repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
cGAN-KD | cGAN-KD-main/UTKFace/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
# self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100
self.bn0 = nn.BatchNorm2d(nChannels[0])
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Sequential(
nn.Linear(nChannels[3], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
# out = self.conv1(x) #for cifar
out = F.relu(self.bn0(self.conv1(x)))
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
out = self.fc(out)
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 64, 64)
net = wrn_16_1()
out = net(x)
print(out.shape)
| 4,962 | 32.308725 | 116 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/baseline_cnn.py | print("\n===================================================================================================")
import os
import argparse
import shutil
import timeit
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import random
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib as mpl
from torch import autograd
from torchvision.utils import save_image
import csv
from tqdm import tqdm
import gc
import h5py
### import my stuffs ###
from opts import cnn_opts
from models import *
from utils import *
from train_cnn import train_cnn, test_cnn
#######################################################################################
''' Settings '''
#######################################################################################
args = cnn_opts()
print(args)
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# CNN settings
## lr decay scheme
lr_decay_epochs = (args.lr_decay_epochs).split("_")
lr_decay_epochs = [int(epoch) for epoch in lr_decay_epochs]
#-------------------------------
# output folders
if args.fake_data_path!="None" and args.nfake>0:
fake_data_name = args.fake_data_path.split("/")[-1]
output_directory = os.path.join(args.root_path, 'output/CNN/{}_useNfake_{}'.format(fake_data_name, args.nfake))
cnn_info = '{}_lr_{}_decay_{}_finetune_{}'.format(args.cnn_name, args.lr_base, args.weight_decay, args.finetune)
else:
output_directory = os.path.join(args.root_path, 'output/CNN/vanilla')
cnn_info = '{}_lr_{}_decay_{}'.format(args.cnn_name, args.lr_base, args.weight_decay)
os.makedirs(output_directory, exist_ok=True)
#######################################################################################
''' Data loader '''
#######################################################################################
print('\n Loading real data...')
hf = h5py.File(os.path.join(args.data_path, 'SteeringAngle_64x64_prop_0.8.h5'), 'r')
images_train = hf['images_train'][:]
labels_train = hf['labels_train'][:]
images_test = hf['images_test'][:]
labels_test = hf['labels_test'][:]
hf.close()
min_label = -88.13
max_label = 97.92
assert labels_train.min()>= min_label and labels_test.min()>=min_label
assert labels_train.max()<= max_label and labels_test.max()<=max_label
# some functions
def fn_norm_labels(labels):
'''
labels: unnormalized labels
'''
shift_value = np.abs(min_label)
labels_after_shift = labels + shift_value
max_label_after_shift = max_label + shift_value
return labels_after_shift/max_label_after_shift
def fn_denorm_labels(labels):
'''
labels: normalized labels; numpy array
'''
shift_value = np.abs(min_label)
max_label_after_shift = max_label + shift_value
labels = labels * max_label_after_shift
labels = labels - shift_value
return labels
## normalize to [0,1]
labels_train = fn_norm_labels(labels_train)
labels_test = fn_norm_labels(labels_test)
# print(labels_train.min(), labels_train.max())
# print(labels_test.min(), labels_test.max())
## number of real images
nreal = len(labels_train)
assert len(labels_train) == len(images_train)
## load fake data if needed
if args.fake_data_path != 'None':
print("\n Start loading fake data: {}...".format(args.fake_data_path))
hf = h5py.File(args.fake_data_path, 'r')
fake_images = hf['fake_images'][:]
fake_labels = hf['fake_labels'][:]
hf.close()
print('\n Fake images: {}, min {}, max {}.'.format(fake_images.shape, fake_images.min(), fake_images.max()))
print('\n Fake labels: {}, min {}, max {}.'.format(fake_labels.shape, fake_labels.min(), fake_labels.max()))
assert np.max(fake_images)>1 and np.min(fake_images)>=0
indx_fake = np.arange(len(fake_labels))
np.random.shuffle(indx_fake)
indx_fake = indx_fake[0:args.nfake]
fake_images = fake_images[indx_fake]
fake_labels = fake_labels[indx_fake]
assert len(fake_images)==len(fake_labels)
fake_labels = fn_norm_labels(fake_labels)
fake_labels = np.clip(fake_labels, 0, 1)
print("\n Range of normalized fake labels: ", fake_labels.min(), fake_labels.max())
## combine fake and real
images_train = np.concatenate((images_train, fake_images), axis=0)
labels_train = np.concatenate((labels_train, fake_labels))
## if
## data loader for the training set and test set
# trainset = IMGs_dataset(images_train, labels_train, normalize=True)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size_train, shuffle=True, num_workers=args.num_workers)
testset = IMGs_dataset(images_test, labels_test, normalize=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size_test, shuffle=False, num_workers=args.num_workers)
## info of training set and test set
print("\n Training set: {}x{}x{}x{}; Testing set: {}x{}x{}x{}.".format(images_train.shape[0], images_train.shape[1], images_train.shape[2], images_train.shape[3], images_test.shape[0], images_test.shape[1], images_test.shape[2], images_test.shape[3]))
#######################################################################################
''' CNN Training '''
#######################################################################################
### model initialization
net = cnn_dict[args.cnn_name]()
num_parameters = count_parameters(net)
### start training
if args.finetune:
filename_ckpt = os.path.join(output_directory, 'ckpt_{}_epoch_{}_finetune_True_last.pth'.format(args.cnn_name, args.epochs))
## load pre-trained model
checkpoint = torch.load(args.init_model_path)
net.load_state_dict(checkpoint['net_state_dict'])
else:
filename_ckpt = os.path.join(output_directory, 'ckpt_{}_epoch_{}_last.pth'.format(args.cnn_name, args.epochs))
print('\n' + filename_ckpt)
# training
if not os.path.isfile(filename_ckpt):
print("\n Start training the {} >>>".format(args.cnn_name))
path_to_ckpt_in_train = output_directory + '/ckpts_in_train/{}'.format(cnn_info)
os.makedirs(path_to_ckpt_in_train, exist_ok=True)
train_cnn(net=net, net_name=args.cnn_name, train_images=images_train, train_labels=labels_train, testloader=testloader, epochs=args.epochs, resume_epoch=args.resume_epoch, save_freq=args.save_freq, batch_size=args.batch_size_train, lr_base=args.lr_base, lr_decay_factor=args.lr_decay_factor, lr_decay_epochs=lr_decay_epochs, weight_decay=args.weight_decay, path_to_ckpt = path_to_ckpt_in_train, fn_denorm_labels=fn_denorm_labels)
# store model
torch.save({
'net_state_dict': net.state_dict(),
}, filename_ckpt)
print("\n End training CNN.")
else:
print("\n Loading pre-trained {}.".format(args.cnn_name))
checkpoint = torch.load(filename_ckpt)
net.load_state_dict(checkpoint['net_state_dict'])
#end if
# testing
test_mae = test_cnn(net, testloader, fn_denorm_labels=fn_denorm_labels, verbose=True)
print("\n Test MAE {}.".format(test_mae))
test_results_logging_fullpath = output_directory + '/test_results_{}_MAE_{:.3f}.txt'.format(cnn_info, test_mae)
if not os.path.isfile(test_results_logging_fullpath):
test_results_logging_file = open(test_results_logging_fullpath, "w")
test_results_logging_file.close()
with open(test_results_logging_fullpath, 'a') as test_results_logging_file:
test_results_logging_file.write("\n===================================================================================================")
test_results_logging_file.write("\n {}; num paras: {}; seed: {} \n".format(cnn_info, num_parameters, args.seed))
print(args, file=test_results_logging_file)
test_results_logging_file.write("\n Test MAE {}.".format(test_mae))
print("\n===================================================================================================")
| 8,254 | 36.017937 | 433 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/eval_metrics.py | """
Compute
Inception Score (IS),
Frechet Inception Discrepency (FID), ref "https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py"
Maximum Mean Discrepancy (MMD)
for a set of fake images
use numpy array
Xr: high-level features for real images; nr by d array
Yr: labels for real images
Xg: high-level features for fake images; ng by d array
Yg: labels for fake images
IMGSr: real images
IMGSg: fake images
"""
import os
import gc
import numpy as np
# from numpy import linalg as LA
from scipy import linalg
import torch
import torch.nn as nn
from scipy.stats import entropy
from torch.nn import functional as F
from torchvision.utils import save_image
from utils import SimpleProgressBar, IMGs_dataset
##############################################################################
# FID scores
##############################################################################
# compute FID based on extracted features
def FID(Xr, Xg, eps=1e-10):
'''
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
'''
#sample mean
MUr = np.mean(Xr, axis = 0)
MUg = np.mean(Xg, axis = 0)
mean_diff = MUr - MUg
#sample covariance
SIGMAr = np.cov(Xr.transpose())
SIGMAg = np.cov(Xg.transpose())
# Product might be almost singular
covmean, _ = linalg.sqrtm(SIGMAr.dot(SIGMAg), disp=False)#square root of a matrix
covmean = covmean.real
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(SIGMAr.shape[0]) * eps
covmean = linalg.sqrtm((SIGMAr + offset).dot(SIGMAg + offset))
#fid score
fid_score = mean_diff.dot(mean_diff) + np.trace(SIGMAr + SIGMAg - 2*covmean)
return fid_score
##test
#Xr = np.random.rand(10000,1000)
#Xg = np.random.rand(10000,1000)
#print(FID(Xr, Xg))
# compute FID from raw images
def cal_FID(PreNetFID, IMGSr, IMGSg, batch_size = 500, resize = None):
#resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
PreNetFID.eval()
nr = IMGSr.shape[0]
ng = IMGSg.shape[0]
nc = IMGSr.shape[1] #IMGSr is nrxNCxIMG_SIExIMG_SIZE
img_size = IMGSr.shape[2]
if batch_size > min(nr, ng):
batch_size = min(nr, ng)
# print("FID: recude batch size to {}".format(batch_size))
#compute the length of extracted features
with torch.no_grad():
test_img = torch.from_numpy(IMGSr[0].reshape((1,nc,img_size,img_size))).type(torch.float).cuda()
if resize is not None:
test_img = nn.functional.interpolate(test_img, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, test_features = PreNetFID(test_img)
test_features = PreNetFID(test_img)
d = test_features.shape[1] #length of extracted features
Xr = np.zeros((nr, d))
Xg = np.zeros((ng, d))
#batch_size = 500
with torch.no_grad():
tmp = 0
pb1 = SimpleProgressBar()
for i in range(nr//batch_size):
imgr_tensor = torch.from_numpy(IMGSr[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgr_tensor = nn.functional.interpolate(imgr_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, Xr_tmp = PreNetFID(imgr_tensor)
Xr_tmp = PreNetFID(imgr_tensor)
Xr[tmp:(tmp+batch_size)] = Xr_tmp.detach().cpu().numpy()
tmp+=batch_size
# pb1.update(min(float(i)*100/(nr//batch_size), 100))
pb1.update(min(max(tmp/nr*100,100), 100))
del Xr_tmp,imgr_tensor; gc.collect()
torch.cuda.empty_cache()
tmp = 0
pb2 = SimpleProgressBar()
for j in range(ng//batch_size):
imgg_tensor = torch.from_numpy(IMGSg[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgg_tensor = nn.functional.interpolate(imgg_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, Xg_tmp = PreNetFID(imgg_tensor)
Xg_tmp = PreNetFID(imgg_tensor)
Xg[tmp:(tmp+batch_size)] = Xg_tmp.detach().cpu().numpy()
tmp+=batch_size
# pb2.update(min(float(j)*100/(ng//batch_size), 100))
pb2.update(min(max(tmp/ng*100, 100), 100))
del Xg_tmp,imgg_tensor; gc.collect()
torch.cuda.empty_cache()
fid_score = FID(Xr, Xg, eps=1e-6)
return fid_score
##############################################################################
# label_score
# difference between assigned label and predicted label
##############################################################################
def cal_labelscore(PreNet, images, labels_assi, min_label_before_shift, max_label_after_shift, batch_size = 500, resize = None, num_workers=0):
'''
PreNet: pre-trained CNN
images: fake images
labels_assi: assigned labels
resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
'''
PreNet.eval()
# assume images are nxncximg_sizeximg_size
n = images.shape[0]
nc = images.shape[1] #number of channels
img_size = images.shape[2]
labels_assi = labels_assi.reshape(-1)
eval_trainset = IMGs_dataset(images, labels_assi, normalize=False)
eval_dataloader = torch.utils.data.DataLoader(eval_trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
labels_pred = np.zeros(n+batch_size)
nimgs_got = 0
pb = SimpleProgressBar()
for batch_idx, (batch_images, batch_labels) in enumerate(eval_dataloader):
batch_images = batch_images.type(torch.float).cuda()
batch_labels = batch_labels.type(torch.float).cuda()
batch_size_curr = len(batch_labels)
batch_labels_pred, _ = PreNet(batch_images)
labels_pred[nimgs_got:(nimgs_got+batch_size_curr)] = batch_labels_pred.detach().cpu().numpy().reshape(-1)
nimgs_got += batch_size_curr
pb.update((float(nimgs_got)/n)*100)
del batch_images; gc.collect()
torch.cuda.empty_cache()
#end for batch_idx
labels_pred = labels_pred[0:n]
labels_pred = (labels_pred*max_label_after_shift)-np.abs(min_label_before_shift)
labels_assi = (labels_assi*max_label_after_shift)-np.abs(min_label_before_shift)
ls_mean = np.mean(np.abs(labels_pred-labels_assi))
ls_std = np.std(np.abs(labels_pred-labels_assi))
return ls_mean, ls_std
| 6,666 | 33.365979 | 143 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/train_net_for_label_embed.py |
import torch
import torch.nn as nn
from torchvision.utils import save_image
import numpy as np
import os
import timeit
from PIL import Image
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
#-------------------------------------------------------------
def train_net_embed(net, train_images, train_labels, test_loader=None, epochs=200, resume_epoch = 0, save_freq=40, batch_size=128, lr_base=0.01, lr_decay_factor=0.1, lr_decay_epochs=[150, 180, 210], weight_decay=1e-4, path_to_ckpt = None, fn_denorm_labels=None):
'''
train_images: unnormalized images
train_labels: normalized labels
'''
assert train_images.max()>1 and train_images.max()<=255.0 and train_images.min()>=0
assert train_labels.min()>=0 and train_labels.max()<=1.0
unique_train_labels = np.sort(np.array(list(set(train_labels)))) ##sorted unique labels
indx_all = np.arange(len(train_labels))
''' learning rate decay '''
def adjust_learning_rate_1(optimizer, epoch):
"""decrease the learning rate """
lr = lr_base
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
net = net.cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lr_base, momentum= 0.9, weight_decay=weight_decay)
# resume training; load checkpoint
if path_to_ckpt is not None and resume_epoch>0:
save_file = path_to_ckpt + "/embed_x2y_checkpoint_epoch_{}.pth".format(resume_epoch)
checkpoint = torch.load(save_file)
net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
train_mse_all = []
test_mae_all = []
start_tmp = timeit.default_timer()
for epoch in range(resume_epoch, epochs):
net.train()
train_loss = 0
adjust_learning_rate_1(optimizer, epoch)
for _ in range(len(train_labels)//batch_size):
# ### generate target labels
# batch_target_labels = np.random.choice(unique_train_labels, size=batch_size, replace=True)
# batch_unique_train_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
# batch_train_indx = []
# for j in range(len(batch_unique_train_labels)):
# indx_j = np.where(train_labels==batch_unique_train_labels[j])[0]
# indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
# batch_train_indx.append(indx_j)
# batch_train_indx = np.concatenate(batch_train_indx)
# batch_train_indx = batch_train_indx.reshape(-1)
batch_train_indx = np.random.choice(indx_all, size=batch_size, replace=True).reshape(-1)
### get some real images for training
batch_train_images = train_images[batch_train_indx]
batch_train_images = normalize_images(batch_train_images) ## normalize real images
batch_train_images = torch.from_numpy(batch_train_images).type(torch.float).cuda()
assert batch_train_images.max().item()<=1.0
batch_train_labels = train_labels[batch_train_indx]
batch_train_labels = torch.from_numpy(batch_train_labels).type(torch.float).view(-1,1).cuda()
#Forward pass
outputs, _ = net(batch_train_images)
loss = criterion(outputs, batch_train_labels)
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
#end for batch_idx
train_loss = train_loss / (len(train_labels)//batch_size)
train_mse_all.append(train_loss)
if test_loader is None:
print('Train net_x2y for embedding: [epoch %d/%d] train_loss:%f Time:%.4f' % (epoch+1, epochs, train_loss, timeit.default_timer()-start_tmp))
else:
test_mae = test_cnn(net, test_loader, fn_denorm_labels=fn_denorm_labels, verbose=False)
test_mae_all.append(test_mae)
print('Train net_x2y for label embedding: [epoch %d/%d] train_loss:%f test_MAE:%f Time:%.4f' % (epoch+1, epochs, train_loss, test_mae, timeit.default_timer()-start_tmp))
# net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
# with torch.no_grad():
# test_loss = 0
# for batch_test_images, batch_test_labels in test_loader:
# batch_test_images = batch_test_images.type(torch.float).cuda()
# batch_test_labels = batch_test_labels.type(torch.float).view(-1,1).cuda()
# outputs,_ = net(batch_test_images)
# loss = criterion(outputs, batch_test_labels)
# test_loss += loss.cpu().item()
# test_loss = test_loss/len(test_loader)
# test_loss_all.append(test_loss)
# print('Train net_x2y for label embedding: [epoch %d/%d] train_loss:%f test_loss:%f Time:%.4f' % (epoch+1, epochs, train_loss, test_loss, timeit.default_timer()-start_tmp))
#save checkpoint
if path_to_ckpt is not None and (((epoch+1) % save_freq == 0) or (epoch+1==epochs)):
save_file = path_to_ckpt + "/embed_x2y_checkpoint_epoch_{}.pth".format(epoch+1)
torch.save({
'epoch': epoch,
'net_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for epoch
net = net.cpu()
if test_loader is None:
return net, train_mse_all
else:
return net, train_mse_all, test_mae_all
def test_cnn(net, testloader, fn_denorm_labels=None, verbose=False):
net = net.cuda()
net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
abs_diff_avg = 0
total = 0
for _, (images, labels) in enumerate(testloader):
images = images.type(torch.float).cuda()
labels = labels.type(torch.float).view(-1).cpu().numpy()
outputs, _ = net(images)
outputs = outputs.view(-1).cpu().numpy()
labels = fn_denorm_labels(labels)
outputs = fn_denorm_labels(outputs)
abs_diff_avg += np.sum(np.abs(labels-outputs)) ##comptue MAE not MSE!!!
total += len(labels)
test_mae = abs_diff_avg/total
if verbose:
print('\n Test MAE: {}.'.format(test_mae))
return test_mae
###################################################################################
class label_dataset(torch.utils.data.Dataset):
def __init__(self, labels):
super(label_dataset, self).__init__()
self.labels = labels
self.n_samples = len(self.labels)
def __getitem__(self, index):
y = self.labels[index]
return y
def __len__(self):
return self.n_samples
def train_net_y2h(unique_train_labels_norm, net_y2h, net_embed, epochs=500, lr_base=0.01, lr_decay_factor=0.1, lr_decay_epochs=[150, 250, 350], weight_decay=1e-4, batch_size=128):
'''
unique_train_labels_norm: an array of normalized unique labels
'''
''' learning rate decay '''
def adjust_learning_rate_2(optimizer, epoch):
"""decrease the learning rate """
lr = lr_base
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
assert np.max(unique_train_labels_norm)<=1 and np.min(unique_train_labels_norm)>=0
trainset = label_dataset(unique_train_labels_norm)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
net_embed = net_embed.cuda()
net_y2h = net_y2h.cuda()
net_embed.eval()
net_h2y=net_embed.module.h2y #convert embedding labels to original labels
optimizer_y2h = torch.optim.SGD(net_y2h.parameters(), lr = lr_base, momentum= 0.9, weight_decay=weight_decay)
start_tmp = timeit.default_timer()
for epoch in range(epochs):
net_y2h.train()
train_loss = 0
adjust_learning_rate_2(optimizer_y2h, epoch)
for _, batch_labels in enumerate(trainloader):
batch_labels = batch_labels.type(torch.float).view(-1,1).cuda()
# generate noises which will be added to labels
batch_size_curr = len(batch_labels)
batch_gamma = np.random.normal(0, 0.2, batch_size_curr)
batch_gamma = torch.from_numpy(batch_gamma).view(-1,1).type(torch.float).cuda()
# add noise to labels
batch_labels_noise = torch.clamp(batch_labels+batch_gamma, 0.0, 1.0)
#Forward pass
batch_hiddens_noise = net_y2h(batch_labels_noise)
batch_rec_labels_noise = net_h2y(batch_hiddens_noise)
loss = nn.MSELoss()(batch_rec_labels_noise, batch_labels_noise)
#backward pass
optimizer_y2h.zero_grad()
loss.backward()
optimizer_y2h.step()
train_loss += loss.cpu().item()
#end for batch_idx
train_loss = train_loss / len(trainloader)
print('\n Train net_y2h: [epoch %d/%d] train_loss:%f Time:%.4f' % (epoch+1, epochs, train_loss, timeit.default_timer()-start_tmp))
#end for epoch
return net_y2h
| 10,156 | 38.675781 | 262 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/DiffAugment_pytorch.py | # Differentiable Augmentation for Data-Efficient GAN Training
# Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han
# https://arxiv.org/pdf/2006.10738
import torch
import torch.nn.functional as F
def DiffAugment(x, policy='', channels_first=True):
if policy:
if not channels_first:
x = x.permute(0, 3, 1, 2)
for p in policy.split(','):
for f in AUGMENT_FNS[p]:
x = f(x)
if not channels_first:
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
return x
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
| 3,025 | 38.298701 | 110 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/generate_synthetic_data.py | print("\n===================================================================================================")
import argparse
import copy
import gc
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib as mpl
import h5py
import os
import random
from tqdm import tqdm, trange
import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision.utils import save_image
import timeit
from PIL import Image
### import my stuffs ###
from opts import gen_synth_data_opts
from utils import SimpleProgressBar, IMGs_dataset, IMGs_dataset_v2, PlotLoss, count_parameters
from models import *
from train_ccgan import train_ccgan, SampCcGAN_given_labels
from train_net_for_label_embed import train_net_embed, train_net_y2h
from train_sparseAE import train_sparseAE
from train_cdre import train_cdre
from eval_metrics import cal_FID, cal_labelscore
#######################################################################################
''' Settings '''
#######################################################################################
args = gen_synth_data_opts()
print(args)
if args.subsampling:
subsampling_method = "cDR-RS_presae_epochs_{}_DR_{}_epochs_{}_lambda_{:.3f}".format(args.dre_presae_epochs, args.dre_net, args.dre_epochs, args.dre_lambda)
else:
subsampling_method = "None"
## filter??
if args.filter:
subsampling_method = subsampling_method + "_filter_{}_perc_{:.2f}".format(args.samp_filter_precnn_net, args.samp_filter_mae_percentile_threshold)
else:
subsampling_method = subsampling_method + "_filter_None"
## adjust labels??
subsampling_method = subsampling_method + "_adjust_{}".format(args.adjust)
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# output folders
output_directory = os.path.join(args.root_path, 'output')
os.makedirs(output_directory, exist_ok=True)
## folders for CcGAN and cDRE and fake data
save_models_folder = os.path.join(output_directory, 'CcGAN/saved_models')
os.makedirs(save_models_folder, exist_ok=True)
save_images_folder = os.path.join(output_directory, 'CcGAN/saved_images')
os.makedirs(save_images_folder, exist_ok=True)
fake_data_folder = os.path.join(output_directory, 'fake_data')
os.makedirs(fake_data_folder, exist_ok=True)
#######################################################################################
''' Data loader '''
#######################################################################################
print('\n Loading real data...')
hf = h5py.File(os.path.join(args.data_path, 'SteeringAngle_64x64_prop_0.8.h5'), 'r')
images_train = hf['images_train'][:]
labels_train = hf['labels_train'][:]
images_test = hf['images_test'][:]
labels_test = hf['labels_test'][:]
hf.close()
min_label = -88.13
max_label = 97.92
assert labels_train.min()>= min_label and labels_test.min()>=min_label
assert labels_train.max()<= max_label and labels_test.max()<=max_label
# some functions
def fn_norm_labels(labels):
'''
labels: unnormalized labels
'''
shift_value = np.abs(min_label)
labels_after_shift = labels + shift_value
max_label_after_shift = max_label + shift_value
return labels_after_shift/max_label_after_shift
def fn_denorm_labels(labels):
'''
labels: normalized labels; numpy array
'''
shift_value = np.abs(min_label)
max_label_after_shift = max_label + shift_value
labels = labels * max_label_after_shift
labels = labels - shift_value
return labels
# unique normalized training labels
## normalize training labels to [0,1]
labels_train_norm = fn_norm_labels(labels_train)
unique_labels_train_norm = np.sort(np.array(list(set(labels_train_norm))))
labels_test_norm = fn_norm_labels(labels_test)
## set sigma and kappa/nu in CcGAN
if args.gan_kernel_sigma<0:
std_label = np.std(labels_train_norm)
args.gan_kernel_sigma = 1.06*std_label*(len(labels_train_norm))**(-1/5)
print("\n Use rule-of-thumb formula to compute kernel_sigma >>>")
print("\n The std of {} normalied training labels is {} so the kernel sigma is {}".format(len(labels_train_norm), std_label, args.gan_kernel_sigma))
##end if
if args.gan_kappa<0:
n_unique = len(unique_labels_train_norm)
diff_list = []
for i in range(1,n_unique):
diff_list.append(unique_labels_train_norm[i] - unique_labels_train_norm[i-1])
kappa_base = np.abs(args.gan_kappa)*np.max(np.array(diff_list))
print("\n The maximum gap of the normalized training labels is {}".format(max(diff_list)))
if args.gan_threshold_type=="hard":
args.gan_kappa = kappa_base
else:
args.gan_kappa = 1/kappa_base**2
## end if
if args.dre_kappa<0:
n_unique = len(unique_labels_train_norm)
diff_list = []
for i in range(1,n_unique):
diff_list.append(unique_labels_train_norm[i] - unique_labels_train_norm[i-1])
kappa_base = np.abs(args.dre_kappa)*np.max(np.array(diff_list))
args.dre_kappa = kappa_base
assert args.dre_kappa>=0
#######################################################################################
''' Pre-trained CNN for label embedding '''
#######################################################################################
net_embed_x2y_filename_ckpt = save_models_folder + '/ckpt_embed_{}_epoch_{}_seed_{}.pth'.format(args.gan_embed_x2y_net_name, args.gan_embed_x2y_epoch, args.seed)
print(net_embed_x2y_filename_ckpt)
net_embed_y2h_filename_ckpt = save_models_folder + '/ckpt_embed_y2h_epoch_{}_seed_{}.pth'.format(args.gan_embed_y2h_epoch, args.seed)
print(net_embed_y2h_filename_ckpt)
testset_embed_x2y = IMGs_dataset(images_test, labels_test_norm, normalize=True)
testloader_embed_x2y = torch.utils.data.DataLoader(testset_embed_x2y, batch_size=100, shuffle=False, num_workers=args.num_workers)
if args.gan_embed_x2y_net_name == "ResNet18":
net_embed_x2y = ResNet18_embed(dim_embed=args.gan_dim_embed)
elif args.gan_embed_x2y_net_name == "ResNet34":
net_embed_x2y = ResNet34_embed(dim_embed=args.gan_dim_embed)
elif args.gan_embed_x2y_net_name == "ResNet50":
net_embed_x2y = ResNet50_embed(dim_embed=args.gan_dim_embed)
else:
raise Exception("Wrong embedding net name!")
net_embed_x2y = net_embed_x2y.cuda()
net_embed_x2y = nn.DataParallel(net_embed_x2y)
net_embed_y2h = model_y2h(dim_embed=args.gan_dim_embed)
net_embed_y2h = net_embed_y2h.cuda()
net_embed_y2h = nn.DataParallel(net_embed_y2h)
## (1). Train net_embed first: x2h+h2y
if not os.path.isfile(net_embed_x2y_filename_ckpt):
print("\n Start training CNN for label embedding >>>")
# lr decay epochs
net_embed_x2y_lr_decay_epochs = (args.gan_embed_x2y_lr_decay_epochs).split("_")
net_embed_x2y_lr_decay_epochs = [int(epoch) for epoch in net_embed_x2y_lr_decay_epochs]
# ckpts in training
ckpts_in_train_net_embed_x2y = os.path.join(save_models_folder, 'ckpts_in_train_embed_x2y_{}'.format(args.gan_embed_x2y_net_name))
os.makedirs(ckpts_in_train_net_embed_x2y, exist_ok=True)
net_test_mae_file_fullpath = os.path.join(ckpts_in_train_net_embed_x2y, 'test_mae_embed_{}_epoch_{}_seed_{}.png'.format(args.gan_embed_x2y_net_name, args.gan_embed_x2y_epoch, args.seed))
# training function
net_embed_x2y, train_mse_all, test_mae_all = train_net_embed(net=net_embed_x2y, train_images=images_train, train_labels=labels_train_norm, test_loader=testloader_embed_x2y, epochs=args.gan_embed_x2y_epoch, resume_epoch=args.gan_embed_x2y_resume_epoch, save_freq=40, batch_size=args.gan_embed_x2y_batch_size, lr_base=args.gan_embed_x2y_lr_base, lr_decay_factor=args.gan_embed_x2y_lr_decay_factor, lr_decay_epochs=net_embed_x2y_lr_decay_epochs, weight_decay=1e-4, path_to_ckpt = ckpts_in_train_net_embed_x2y, fn_denorm_labels=fn_denorm_labels)
PlotLoss(loss=test_mae_all, filename=net_test_mae_file_fullpath)
# save model
torch.save({
'net_state_dict': net_embed_x2y.state_dict(),
}, net_embed_x2y_filename_ckpt)
else:
print("\n net_embed ckpt already exists")
print("\n Loading...")
checkpoint = torch.load(net_embed_x2y_filename_ckpt)
net_embed_x2y.load_state_dict(checkpoint['net_state_dict'])
#end not os.path.isfile
## (2). Train y2h
#train a net which maps a label back to the embedding space
if not os.path.isfile(net_embed_y2h_filename_ckpt):
print("\n Start training net_embed_y2h >>>")
# lr decay epochs
net_embed_y2h_lr_decay_epochs = (args.gan_embed_y2h_lr_decay_epochs).split("_")
net_embed_y2h_lr_decay_epochs = [int(epoch) for epoch in net_embed_y2h_lr_decay_epochs]
# training function
net_embed_y2h = train_net_y2h(unique_train_labels_norm=unique_labels_train_norm, net_y2h=net_embed_y2h, net_embed=net_embed_x2y, epochs=args.gan_embed_y2h_epoch, lr_base=args.gan_embed_y2h_lr_base, lr_decay_factor=args.gan_embed_y2h_lr_decay_factor, lr_decay_epochs=net_embed_y2h_lr_decay_epochs, weight_decay=1e-4, batch_size=args.gan_embed_y2h_batch_size)
# save model
torch.save({
'net_state_dict': net_embed_y2h.state_dict(),
}, net_embed_y2h_filename_ckpt)
else:
print("\n net_embed_y2h ckpt already exists")
print("\n Loading...")
checkpoint = torch.load(net_embed_y2h_filename_ckpt)
net_embed_y2h.load_state_dict(checkpoint['net_state_dict'])
#end not os.path.isfile
##some simple test after the embedding nets training
indx_tmp = np.arange(len(unique_labels_train_norm))
np.random.shuffle(indx_tmp)
indx_tmp = indx_tmp[:10]
labels_tmp = unique_labels_train_norm[indx_tmp].reshape(-1,1)
labels_tmp = torch.from_numpy(labels_tmp).type(torch.float).cuda()
epsilons_tmp = np.random.normal(0, 0.2, len(labels_tmp))
epsilons_tmp = torch.from_numpy(epsilons_tmp).view(-1,1).type(torch.float).cuda()
labels_tmp = torch.clamp(labels_tmp+epsilons_tmp, 0.0, 1.0)
net_embed_x2y.eval()
net_embed_h2y = net_embed_x2y.module.h2y
net_embed_y2h.eval()
with torch.no_grad():
labels_rec_tmp = net_embed_h2y(net_embed_y2h(labels_tmp)).cpu().numpy().reshape(-1,1)
results = np.concatenate((labels_tmp.cpu().numpy(), labels_rec_tmp), axis=1)
print("\n labels vs reconstructed labels")
print(results)
#######################################################################################
''' GAN training '''
#######################################################################################
print("\n Start CcGAN training: {}, Sigma is {:.3f}, Kappa is {:.3f}".format(args.gan_threshold_type, args.gan_kernel_sigma, args.gan_kappa))
path_to_ckpt_ccgan = os.path.join(save_models_folder, 'ckpt_{}_loss_{}_niters_{}_seed_{}_{}_sigma{:.3f}_kappa{:.3f}.pth'.format(args.gan_arch, args.gan_loss_type, args.gan_niters, args.seed, args.gan_threshold_type, args.gan_kernel_sigma, args.gan_kappa))
print(path_to_ckpt_ccgan)
start = timeit.default_timer()
if not os.path.isfile(path_to_ckpt_ccgan):
## images generated during training
images_in_train_ccgan = os.path.join(save_images_folder, 'images_in_train_{}'.format(args.gan_arch))
os.makedirs(images_in_train_ccgan, exist_ok=True)
# ckpts in training
ckpts_in_train_ccgan = os.path.join(save_models_folder, 'ckpts_in_train_{}'.format(args.gan_arch))
os.makedirs(ckpts_in_train_ccgan, exist_ok=True)
# init models
if args.gan_arch == 'SNGAN':
netG = SNGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
netD = SNGAN_Discriminator(disc_ch=args.gan_disc_ch, dim_embed=args.gan_dim_embed).cuda()
elif args.gan_arch == 'SAGAN':
netG = SAGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
netD = SAGAN_Discriminator(disc_ch=args.gan_disc_ch, dim_embed=args.gan_dim_embed).cuda()
else:
raise Exception('Wrong CcGAN name!')
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
# training function
netG, netD = train_ccgan(kernel_sigma=args.gan_kernel_sigma, kappa=args.gan_kappa, train_images=images_train, train_labels=labels_train_norm, netG=netG, netD=netD, net_y2h = net_embed_y2h, save_images_folder = images_in_train_ccgan, path_to_ckpt = ckpts_in_train_ccgan, clip_label=False)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, path_to_ckpt_ccgan)
else:
print("Loading pre-trained generator >>>")
checkpoint = torch.load(path_to_ckpt_ccgan)
if args.gan_arch == 'SNGAN':
netG = SNGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
elif args.gan_arch == 'SAGAN':
netG = SAGAN_Generator(z_dim=args.gan_dim_g, gene_ch=args.gan_gene_ch, dim_embed=args.gan_dim_embed).cuda()
else:
raise Exception('Wrong CcGAN name!')
netG = nn.DataParallel(netG)
netG.load_state_dict(checkpoint['netG_state_dict'])
## end if
stop = timeit.default_timer()
print("CcGAN training finished; Time elapses: {}s".format(stop - start))
def fn_sampleGAN_given_label(nfake, given_label, netG=netG, net_y2h=net_embed_y2h, batch_size = 100, to_numpy=True, denorm=True, verbose=False):
''' label: normalized label in [0,1] '''
''' output labels are still normalized '''
assert 0<=given_label<=1.0
netG = netG.cuda()
net_y2h = net_y2h.cuda()
netG.eval()
net_y2h.eval()
if batch_size>nfake:
batch_size = nfake
fake_images = []
with torch.no_grad():
if verbose:
pb = SimpleProgressBar()
n_img_got = 0
while n_img_got < nfake:
y = np.ones(batch_size) * given_label
y = torch.from_numpy(y).type(torch.float).view(-1,1).cuda()
z = torch.randn(batch_size, args.gan_dim_g, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(y))
if denorm:
batch_fake_images = (batch_fake_images*0.5+0.5)*255.0
batch_fake_images = batch_fake_images.type(torch.uint8)
fake_images.append(batch_fake_images.cpu())
n_img_got += len(batch_fake_images)
if verbose:
pb.update(min(float(n_img_got)/nfake, 1)*100)
fake_images = torch.cat(fake_images, dim=0)
fake_labels = torch.ones(nfake) * given_label #use assigned label
if to_numpy:
fake_images = fake_images.numpy()
fake_labels = fake_labels.numpy()
netG = netG.cpu()
net_y2h = net_y2h.cpu()
return fake_images[0:nfake], fake_labels[0:nfake]
#######################################################################################
''' cDRE training '''
#######################################################################################
if args.subsampling:
##############################################
''' Pre-trained CNN for feature extraction '''
print("\n -----------------------------------------------------------------------------------------")
print("\n Pre-trained CNN for feature extraction")
# filename
filename_presae_ckpt = save_models_folder + '/ckpt_PreSAEForDRE_epoch_{}_sparsity_{:.3f}_regre_{:.3f}_seed_{}.pth'.format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.seed)
print('\n ' + filename_presae_ckpt)
# training
if not os.path.isfile(filename_presae_ckpt):
save_sae_images_InTrain_folder = save_images_folder + '/PreSAEForDRE_reconstImages_sparsity_{:.3f}_regre_{:.3f}_InTrain_{}'.format(args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.seed)
os.makedirs(save_sae_images_InTrain_folder, exist_ok=True)
# initialize net
dre_presae_encoder_net = encoder_extract(ch=args.dre_presae_ch, dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_decoder_net = decoder_extract(ch=args.dre_presae_ch, dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_predict_net = decoder_predict(dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_encoder_net = nn.DataParallel(dre_presae_encoder_net)
dre_presae_decoder_net = nn.DataParallel(dre_presae_decoder_net)
dre_presae_predict_net = nn.DataParallel(dre_presae_predict_net)
count_parameters(dre_presae_encoder_net)
count_parameters(dre_presae_decoder_net)
count_parameters(dre_presae_predict_net)
print("\n Start training sparseAE model for feature extraction in the DRE >>>")
dre_presae_encoder_net, dre_presae_decoder_net, dre_presae_predict_net = train_sparseAE(unique_labels=unique_labels_train_norm, train_images=images_train, train_labels=labels_train_norm, net_encoder=dre_presae_encoder_net, net_decoder=dre_presae_decoder_net, net_predict=dre_presae_predict_net, save_sae_images_folder=save_sae_images_InTrain_folder, path_to_ckpt=save_models_folder)
# store model
torch.save({
'encoder_net_state_dict': dre_presae_encoder_net.state_dict(),
'predict_net_state_dict': dre_presae_predict_net.state_dict(),
# 'decoder_net_state_dict': dre_presae_decoder_net.state_dict(),
}, filename_presae_ckpt)
print("\n End training CNN.")
else:
print("\n Loading pre-trained sparseAE for feature extraction in DRE.")
dre_presae_encoder_net = encoder_extract(ch=args.dre_presae_ch, dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_predict_net = decoder_predict(dim_bottleneck=args.img_size*args.img_size*args.num_channels)
dre_presae_encoder_net = nn.DataParallel(dre_presae_encoder_net)
dre_presae_predict_net = nn.DataParallel(dre_presae_predict_net)
checkpoint = torch.load(filename_presae_ckpt)
dre_presae_encoder_net.load_state_dict(checkpoint['encoder_net_state_dict'])
dre_presae_predict_net.load_state_dict(checkpoint['predict_net_state_dict'])
#end if
##############################################
''' cDRE Training '''
print("\n -----------------------------------------------------------------------------------------")
print("\n cDRE training")
### dr model filename
drefile_fullpath = save_models_folder + "/ckpt_cDR-RS_presae_epochs_{}_sparsity_{:.3f}_regre_{:.3f}_DR_{}_lambda_{:.3f}_kappa_{:.3f}_epochs_{}_seed_{}.pth".format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.dre_net, args.dre_lambda, args.dre_kappa, args.dre_epochs, args.seed)
print('\n' + drefile_fullpath)
path_to_ckpt_in_train = save_models_folder + '/ckpt_cDR-RS_presae_epochs_{}_sparsity_{:.3f}_regre_{:.3f}_DR_{}_lambda_{:.3f}_kappa_{:.3f}_seed_{}'.format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.dre_net, args.dre_lambda, args.dre_kappa, args.seed)
os.makedirs(path_to_ckpt_in_train, exist_ok=True)
dre_loss_file_fullpath = save_models_folder + '/train_loss_cDR-RS_presae_epochs_{}_sparsity_{:.3f}_regre_{:.3f}_DR_{}_epochs_{}_lambda_{:.3f}_kappa_{:.3f}_seed_{}.png'.format(args.dre_presae_epochs, args.dre_presae_lambda_sparsity, args.dre_presae_lambda_regression, args.dre_net, args.dre_epochs, args.dre_lambda, args.dre_kappa, args.seed)
### dre training
if args.dre_net in ["MLP3","MLP5"]:
dre_net = cDR_MLP(args.dre_net, p_dropout=0.5, init_in_dim = args.num_channels*args.img_size*args.img_size, dim_embed = args.gan_dim_embed)
else:
raise Exception('Wrong DR name!')
num_parameters_DR = count_parameters(dre_net)
dre_net = nn.DataParallel(dre_net)
#if DR model exists, then load the pretrained model; otherwise, start training the model.
if not os.path.isfile(drefile_fullpath):
print("\n Begin Training conditional DR in Feature Space: >>>")
dre_net, avg_train_loss = train_cdre(kappa=args.dre_kappa, unique_labels=unique_labels_train_norm, train_images=images_train, train_labels=labels_train_norm, dre_net=dre_net, dre_precnn_net=dre_presae_encoder_net, netG=netG, net_y2h=net_embed_y2h, path_to_ckpt=path_to_ckpt_in_train)
# save model
torch.save({
'net_state_dict': dre_net.state_dict(),
}, drefile_fullpath)
PlotLoss(avg_train_loss, dre_loss_file_fullpath)
else:
# if already trained, load pre-trained DR model
checkpoint_dre_net = torch.load(drefile_fullpath)
dre_net.load_state_dict(checkpoint_dre_net['net_state_dict'])
##end if not
# Compute density ratio: function for computing a bunch of images in a numpy array
def comp_cond_density_ratio(imgs, labels, dre_precnn_net=dre_presae_encoder_net, dre_net=dre_net, net_y2h=net_embed_y2h, batch_size=args.samp_batch_size):
assert imgs.max()>1
assert labels.min()>=0 and labels.max()<=1.0
dre_precnn_net = dre_precnn_net.cuda()
dre_net = dre_net.cuda()
net_y2h = net_y2h.cuda()
dre_precnn_net.eval()
dre_net.eval()
net_y2h.eval()
#imgs: a torch tensor
n_imgs = len(imgs)
if batch_size>n_imgs:
batch_size = n_imgs
assert imgs.max().item()>1.0 ##make sure all images are not normalized
assert labels.max()<=1.0 and labels.min()>=0 ##make sure all labels are normalized to [0,1]
##make sure the last iteration has enough samples
imgs = torch.cat((imgs, imgs[0:batch_size]), dim=0)
labels = torch.cat((labels, labels[0:batch_size]), dim=0)
density_ratios = []
# print("\n Begin computing density ratio for images >>")
with torch.no_grad():
n_imgs_got = 0
while n_imgs_got < n_imgs:
batch_images = imgs[n_imgs_got:(n_imgs_got+batch_size)]
batch_images = (batch_images/255.0-0.5)/0.5 ## normalize
batch_labels = labels[n_imgs_got:(n_imgs_got+batch_size)]
batch_images = batch_images.type(torch.float).cuda()
batch_labels = batch_labels.type(torch.float).view(-1,1).cuda()
batch_labels = net_y2h(batch_labels)
batch_features = dre_precnn_net(batch_images)
batch_ratios = dre_net(batch_features, batch_labels)
density_ratios.append(batch_ratios.cpu().detach())
n_imgs_got += batch_size
### while n_imgs_got
density_ratios = torch.cat(density_ratios)
density_ratios = density_ratios[0:n_imgs].numpy()
return density_ratios
# Enhanced sampler based on the trained DR model
# Rejection Sampling:"Discriminator Rejection Sampling"; based on https://github.com/shinseung428/DRS_Tensorflow/blob/master/config.py
def fn_enhancedSampler_given_label(nfake, given_label, batch_size=args.samp_batch_size, verbose=True):
''' given_label is normalized '''
assert 0<=given_label<=1.0
## Burn-in Stage
n_burnin = args.samp_burnin_size
burnin_imgs, burnin_labels = fn_sampleGAN_given_label(n_burnin, given_label, batch_size = batch_size, to_numpy=False, denorm=True)
burnin_densityratios = comp_cond_density_ratio(burnin_imgs, burnin_labels)
# print((burnin_densityratios.min(),np.median(burnin_densityratios),burnin_densityratios.max()))
M_bar = np.max(burnin_densityratios)
del burnin_imgs, burnin_densityratios; gc.collect()
## Rejection sampling
enhanced_imgs = []
if verbose:
pb = SimpleProgressBar()
# pbar = tqdm(total=nfake)
num_imgs = 0
while num_imgs < nfake:
batch_imgs, batch_labels = fn_sampleGAN_given_label(batch_size, given_label, batch_size = batch_size, to_numpy=False, denorm=True)
batch_ratios = comp_cond_density_ratio(batch_imgs, batch_labels)
batch_imgs = batch_imgs.numpy() #convert to numpy array
M_bar = np.max([M_bar, np.max(batch_ratios)])
#threshold
batch_p = batch_ratios/M_bar
batch_psi = np.random.uniform(size=batch_size).reshape(-1,1)
indx_accept = np.where(batch_psi<=batch_p)[0]
if len(indx_accept)>0:
enhanced_imgs.append(batch_imgs[indx_accept])
num_imgs+=len(indx_accept)
del batch_imgs, batch_ratios; gc.collect()
if verbose:
pb.update(np.min([float(num_imgs)*100/nfake,100]))
# pbar.update(len(indx_accept))
# pbar.close()
enhanced_imgs = np.concatenate(enhanced_imgs, axis=0)
enhanced_imgs = enhanced_imgs[0:nfake]
return enhanced_imgs, given_label*np.ones(nfake)
#######################################################################################
''' Sampling '''
#######################################################################################
#--------------------------------------------------------------------------------------
''' Synthetic Data Generation '''
print('\n Start sampling ...')
dump_fake_images_filename = os.path.join(fake_data_folder, 'steeringangle_fake_images_{}_{}_Nlabel_{}_NFakePerLabel_{}_seed_{}.h5'.format(args.gan_arch, subsampling_method, args.samp_num_fake_labels, args.samp_nfake_per_label, args.seed))
print(dump_fake_images_filename)
if not os.path.isfile(dump_fake_images_filename):
fake_data_h5file_fullpath = os.path.join(fake_data_folder, args.unfiltered_fake_dataset_filename)
print(fake_data_h5file_fullpath)
if os.path.isfile(fake_data_h5file_fullpath) and (args.filter or args.adjust):
print("\n Loading exiting unfiltered fake data >>>")
hf = h5py.File(fake_data_h5file_fullpath, 'r')
fake_images = hf['fake_images'][:]
fake_labels = hf['fake_labels'][:] #unnormalized
hf.close()
else:
if fake_data_h5file_fullpath!="None":
print("\r Assigned unfilered fake dataset does not exist!!!")
if args.samp_num_fake_labels>0:
target_labels_norm = np.linspace(0.0, 1.0, args.samp_num_fake_labels)
else:
target_labels_norm = labels_test_norm
if args.subsampling:
print("\n Generating {} fake images for each of {} distinct labels with subsampling: {}.".format(args.samp_nfake_per_label, len(target_labels_norm), subsampling_method))
fake_images = []
fake_labels = []
for i in trange(len(target_labels_norm)):
fake_images_i, fake_labels_i = fn_enhancedSampler_given_label(args.samp_nfake_per_label, target_labels_norm[i], batch_size=args.samp_batch_size, verbose=False)
### append
fake_images.append(fake_images_i)
fake_labels.append(fake_labels_i)
##end for i
else:
print("\n Generating {} fake images for each of {} distinct labels without subsampling.".format(args.samp_nfake_per_label, len(target_labels_norm)))
fake_images = []
fake_labels = []
for i in trange(len(target_labels_norm)):
fake_images_i, fake_labels_i = fn_sampleGAN_given_label(nfake=args.samp_nfake_per_label, given_label=target_labels_norm[i], batch_size=args.samp_batch_size, verbose=False)
### append
fake_images.append(fake_images_i)
fake_labels.append(fake_labels_i)
##end for i
fake_images = np.concatenate(fake_images, axis=0)
fake_labels = np.concatenate(fake_labels, axis=0)
### denormalize labels
fake_labels = np.clip(fake_labels, 0, 1)
fake_labels = fn_denorm_labels(fake_labels)
assert len(fake_images) == args.samp_nfake_per_label*len(target_labels_norm)
assert len(fake_labels) == args.samp_nfake_per_label*len(target_labels_norm)
assert fake_images.max()>1
assert fake_labels.max()>1
##end if
##end if os
#--------------------------------------------------------------------------------------
''' Filtered and Adjusted by a pre-trained CNN '''
if args.filter or args.adjust:
print("\n -----------------------------------------------------------------------------------------")
print("\n Start Filtering Synthetic Data >>>")
## dataset
assert fake_images.max()>1
assert fake_labels.max()>1
dataset_filtering = IMGs_dataset(fake_images, fake_labels, normalize=True)
dataloader_filtering = torch.utils.data.DataLoader(dataset_filtering, batch_size=args.samp_filter_batch_size, shuffle=False, num_workers=args.num_workers)
## load pre-trained cnn
filter_precnn_net = cnn_dict[args.samp_filter_precnn_net]().cuda()
checkpoint = torch.load(args.samp_filter_precnn_net_ckpt_path)
filter_precnn_net.load_state_dict(checkpoint['net_state_dict'])
## evaluate on fake data
fake_mae_loss = []
fake_labels_pred = []
filter_precnn_net.eval()
pbar = tqdm(total=len(fake_images))
with torch.no_grad():
total = 0
loss_all = 0
for batch_idx, (images, labels) in enumerate(dataloader_filtering):
images = images.type(torch.float).cuda()
labels = labels.type(torch.float) #unnormalized label
labels_pred = filter_precnn_net(images)
labels_pred = torch.clip(labels_pred, 0, 1)
labels_pred = fn_denorm_labels(labels_pred.cpu()) #denormalize
labels = labels.view(-1)
labels_pred = labels_pred.view(-1)
loss = torch.abs(labels_pred-labels)
loss_all += loss.sum().item()
total += labels.size(0)
fake_labels_pred.append(labels_pred.numpy())
fake_mae_loss.append(loss.numpy())
pbar.update(len(images))
print('\n Test MAE of {} on the {} fake images: {}.'.format(args.samp_filter_precnn_net, len(fake_images), loss_all / total))
fake_mae_loss = np.concatenate(fake_mae_loss, axis=0)
fake_labels_pred = np.concatenate(fake_labels_pred, axis=0)
mae_cutoff_point = np.quantile(fake_mae_loss, q=args.samp_filter_mae_percentile_threshold)
indx_sel = np.where(fake_mae_loss<mae_cutoff_point)[0]
fake_images = fake_images[indx_sel]
if args.adjust:
fake_labels = fake_labels_pred[indx_sel] #adjust the labels of fake data by using the pre-trained CNN
else:
fake_labels = fake_labels[indx_sel]
## histogram of MAEs
fig = plt.figure()
ax = plt.subplot(111)
n, bins, patches = plt.hist(fake_mae_loss, 100, density=False, facecolor='g', alpha=0.75)
plt.axvline(x=mae_cutoff_point, c='grey')
plt.xlabel('MAE')
plt.ylabel('Frequency')
plt.title('Histogram of MAE')
plt.grid(True)
#plt.show()
plt.savefig(os.path.join(fake_data_folder, 'histogram_of_fake_data_MAE_with_subsampling_{}_MAEFilter_{}.png'.format(subsampling_method, args.samp_filter_mae_percentile_threshold)))
#--------------------------------------------------------------------------------------
''' Dump synthetic data to h5 file '''
with h5py.File(dump_fake_images_filename, "w") as f:
f.create_dataset('fake_images', data = fake_images, dtype='uint8', compression="gzip", compression_opts=6)
f.create_dataset('fake_labels', data = fake_labels, dtype='float')
else:
print('\n Start loading generated fake data...')
with h5py.File(dump_fake_images_filename, "r") as f:
fake_images = f['fake_images'][:]
fake_labels = f['fake_labels'][:]
# if not os.path.isfile(dump_fake_images_filename):
# with h5py.File(dump_fake_images_filename, "w") as f:
# f.create_dataset('fake_images', data = fake_images, dtype='uint8', compression="gzip", compression_opts=6)
# f.create_dataset('fake_labels', data = fake_labels, dtype='float')
# else:
# print('\n Start loading generated fake data...')
# with h5py.File(dump_fake_images_filename, "r") as f:
# fake_images = f['fake_images'][:]
# fake_labels = f['fake_labels'][:]
print("\n The dim of the fake dataset: ", fake_images.shape)
print("\n The range of generated fake dataset: MIN={}, MAX={}.".format(fake_labels.min(), fake_labels.max()))
### visualize data distribution
unique_fake_labels = np.sort(np.array(list(set(fake_labels))))
frequencies = []
for i in range(len(unique_fake_labels)):
indx_i = np.where(fake_labels==unique_fake_labels[i])[0]
frequencies.append(len(indx_i))
frequencies = np.array(frequencies).astype(int)
# plot data in grouped manner of bar type
fig, ax = plt.subplots(1,1, figsize=(6,4))
ax.grid(color='lightgrey', linestyle='--', zorder=0)
ax.bar(unique_fake_labels, frequencies, align='center', color='tab:green', zorder=3)
ax.set_xlabel("Angle")
ax.set_ylabel("Frequency")
plt.tight_layout()
plt.savefig(os.path.join(fake_data_folder, "steeringangle_fake_images_{}_{}_Nlabel_{}_NFakePerLabel_{}_data_dist.pdf".format(args.gan_arch, subsampling_method, args.samp_num_fake_labels, args.samp_nfake_per_label)))
plt.close()
print('\n Frequence of angles: MIN={}, MEAN={}, MAX={}, SUM={}.'.format(np.min(frequencies),np.mean(frequencies),np.max(frequencies),np.sum(frequencies)))
print("\n===================================================================================================")
| 34,078 | 45.940771 | 545 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/utils.py | """
Some helpful functions
"""
import numpy as np
import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch.nn import functional as F
import sys
import PIL
from PIL import Image
# ### import my stuffs ###
# from models import *
# ################################################################################
# Progress Bar
class SimpleProgressBar():
def __init__(self, width=50):
self.last_x = -1
self.width = width
def update(self, x):
assert 0 <= x <= 100 # `x`: progress in percent ( between 0 and 100)
if self.last_x == int(x): return
self.last_x = int(x)
pointer = int(self.width * (x / 100.0))
sys.stdout.write( '\r%d%% [%s]' % (int(x), '#' * pointer + '.' * (self.width - pointer)))
sys.stdout.flush()
if x == 100:
print('')
################################################################################
# torch dataset from numpy array
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, normalize=False):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.normalize = normalize
def __getitem__(self, index):
image = self.images[index]
if self.normalize:
image = image/255.0
image = (image-0.5)/0.5
if self.labels is not None:
label = self.labels[index]
return (image, label)
else:
return image
def __len__(self):
return self.n_images
class IMGs_dataset_v2(torch.utils.data.Dataset):
def __init__(self, images, labels, labels_gt, normalize=False):
super(IMGs_dataset_v2, self).__init__()
assert len(images) == len(labels)
assert len(labels) == len(labels_gt)
self.images = images
self.n_images = len(self.images)
self.labels = labels
self.labels_gt = labels_gt
self.normalize = normalize
def __getitem__(self, index):
image = self.images[index]
label = self.labels[index]
label_gt = self.labels_gt[index]
if self.normalize:
image = image/255.0
image = (image-0.5)/0.5
return (image, label, label_gt)
def __len__(self):
return self.n_images
################################################################################
def PlotLoss(loss, filename):
x_axis = np.arange(start = 1, stop = len(loss)+1)
plt.switch_backend('agg')
mpl.style.use('seaborn')
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x_axis, np.array(loss))
plt.xlabel("epoch")
plt.ylabel("training loss")
plt.legend()
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), shadow=True, ncol=3)
#plt.title('Training Loss')
plt.savefig(filename)
################################################################################
# compute entropy of class labels; labels is a numpy array
def compute_entropy(labels, base=None):
value,counts = np.unique(labels, return_counts=True)
norm_counts = counts / counts.sum()
base = np.e if base is None else base
return -(norm_counts * np.log(norm_counts)/np.log(base)).sum()
def predict_class_labels(net, images, batch_size=500, verbose=False, num_workers=0):
net = net.cuda()
net.eval()
n = len(images)
if batch_size>n:
batch_size=n
dataset_pred = IMGs_dataset(images, normalize=False)
dataloader_pred = torch.utils.data.DataLoader(dataset_pred, batch_size=batch_size, shuffle=False, num_workers=num_workers)
class_labels_pred = np.zeros(n+batch_size)
with torch.no_grad():
nimgs_got = 0
if verbose:
pb = SimpleProgressBar()
for batch_idx, batch_images in enumerate(dataloader_pred):
batch_images = batch_images.type(torch.float).cuda()
batch_size_curr = len(batch_images)
outputs,_ = net(batch_images)
_, batch_class_labels_pred = torch.max(outputs.data, 1)
class_labels_pred[nimgs_got:(nimgs_got+batch_size_curr)] = batch_class_labels_pred.detach().cpu().numpy().reshape(-1)
nimgs_got += batch_size_curr
if verbose:
pb.update((float(nimgs_got)/n)*100)
#end for batch_idx
class_labels_pred = class_labels_pred[0:n]
return class_labels_pred
################################################################################
# Convenience function to count the number of parameters in a module
def count_parameters(module, verbose=True):
num_parameters = sum([p.data.nelement() for p in module.parameters()])
if verbose:
print('Number of parameters: {}'.format(num_parameters))
return num_parameters
| 5,139 | 29.595238 | 143 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/train_cdre.py | '''
Functions for Training Class-conditional Density-ratio model
'''
import torch
import torch.nn as nn
import numpy as np
import os
import timeit
import gc
from utils import *
from opts import gen_synth_data_opts
''' Settings '''
args = gen_synth_data_opts()
# some parameters in the opts
dim_gan = args.gan_dim_g
dre_net_name = args.dre_net
dre_epochs = args.dre_epochs
dre_lr_base = args.dre_lr_base
dre_lr_decay_factor = args.dre_lr_decay_factor
dre_lr_decay_epochs = (args.dre_lr_decay_epochs).split("_")
dre_lr_decay_epochs = [int(epoch) for epoch in dre_lr_decay_epochs]
dre_lambda = args.dre_lambda
dre_resume_epoch = args.dre_resume_epoch
dre_batch_size = args.dre_batch_size
dre_optimizer = args.dre_optimizer
dre_save_freq = args.dre_save_freq
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
# training function
def train_cdre(kappa, unique_labels, train_images, train_labels, dre_net, dre_precnn_net, netG, net_y2h, net_filter=None, reg_niters=10, path_to_ckpt=None):
##data; train_images are unnormalized, train_labels are normalized
## unique_labels: normalized unique labels
assert train_images.max()>1.0 and train_images.max()<=255.0
assert train_labels.max()<=1.0 and train_labels.min()>=0
assert 0<=kappa<=1
indx_all = np.arange(len(train_labels))
''' learning rate decay '''
def adjust_learning_rate(optimizer, epoch):
lr = dre_lr_base
num_decays = len(dre_lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= dre_lr_decay_epochs[decay_i]:
lr = lr * dre_lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#end def adjust lr
# nets
dre_precnn_net = dre_precnn_net.cuda()
netG = netG.cuda()
net_y2h = net_y2h.cuda()
dre_net = dre_net.cuda()
dre_precnn_net.eval()
netG.eval()
net_y2h.eval()
if net_filter is not None and kappa>1e-30: #the predicting branch of the sparse AE
print("\n Do filtering in cDRE training with kappa {}.".format(kappa))
net_filter = net_filter.cuda()
net_filter.eval()
# define optimizer
if dre_optimizer=="SGD":
optimizer = torch.optim.SGD(dre_net.parameters(), lr = dre_lr_base, momentum= 0.9, weight_decay=1e-4)
else:
optimizer = torch.optim.Adam(dre_net.parameters(), lr = dre_lr_base, betas=(0.5, 0.999), weight_decay=1e-4)
if path_to_ckpt is not None and dre_resume_epoch>0:
print("Loading ckpt to resume training dre_net >>>")
ckpt_fullpath = path_to_ckpt + "/cDRE_{}_checkpoint_epoch_{}.pth".format(dre_net_name, dre_resume_epoch)
checkpoint = torch.load(ckpt_fullpath)
dre_net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#load d_loss and g_loss
logfile_fullpath = path_to_ckpt + "/cDRE_{}_train_loss_epoch_{}.npz".format(dre_net_name, dre_resume_epoch)
if os.path.isfile(logfile_fullpath):
avg_train_loss = list(np.load(logfile_fullpath))
else:
avg_train_loss = []
else:
avg_train_loss = []
start_time = timeit.default_timer()
for epoch in range(dre_resume_epoch, dre_epochs):
adjust_learning_rate(optimizer, epoch)
train_loss = 0
for batch_idx in range(len(train_labels)//dre_batch_size):
dre_net.train()
#################################################
''' generate target labels '''
batch_target_labels = np.random.choice(unique_labels, size=dre_batch_size, replace=True)
batch_unique_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
batch_real_indx = []
for j in range(len(batch_unique_labels)):
indx_j = np.where(train_labels==batch_unique_labels[j])[0]
indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
batch_real_indx.append(indx_j)
batch_real_indx = np.concatenate(batch_real_indx)
batch_real_indx = batch_real_indx.reshape(-1)
# batch_real_indx = np.random.choice(indx_all, size=dre_batch_size, replace=True).reshape(-1)
#################################################
''' density ratios of real images '''
## get some real images for training
batch_real_images = train_images[batch_real_indx]
batch_real_images = normalize_images(batch_real_images) ## normalize real images
batch_real_images = torch.from_numpy(batch_real_images).type(torch.float).cuda()
assert batch_real_images.max().item()<=1.0
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).view(-1,1).cuda()
#################################################
''' density ratios of fake images '''
## generate fake labels first
if kappa>1e-30:
batch_fake_labels = np.zeros(dre_batch_size)
vicinity_start = torch.zeros(dre_batch_size).cuda()
vicinity_end = torch.zeros(dre_batch_size).cuda()
for j in range(dre_batch_size):
# start_j = max(0, batch_real_labels[j].item()-kappa)
# end_j = min(1, batch_real_labels[j].item()+kappa)
start_j = batch_real_labels[j].item()-kappa
end_j = batch_real_labels[j].item()+kappa
assert batch_real_labels[j].item()>=start_j and batch_real_labels[j].item()<=end_j
batch_fake_labels[j] = np.random.uniform(low=start_j, high=end_j, size=1)
vicinity_start[j] = start_j
vicinity_end[j] = end_j
batch_fake_labels = torch.from_numpy(batch_fake_labels).type(torch.float).view(-1,1).cuda()
## then, generate fake images
## drop fake images with predicted labels not in the vicinity
with torch.no_grad():
z = torch.randn(dre_batch_size, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_fake_labels))
batch_fake_images = batch_fake_images.detach()
batch_fake_labels_pred = net_filter(batch_fake_images)
indx_drop_1 = batch_fake_labels_pred.view(-1)<vicinity_start
indx_drop_2 = batch_fake_labels_pred.view(-1)>vicinity_end
indx_drop = torch.cat((indx_drop_1.view(-1,1), indx_drop_2.view(-1,1)), dim=1)
indx_drop = torch.any(indx_drop, 1)
## regenerate fake images whose labels are not in the vicinity; at most niter_tmp rounds
niter_tmp = 0
while indx_drop.sum().item()>0 and niter_tmp<=reg_niters:
batch_size_tmp = indx_drop.sum().item()
batch_fake_labels_tmp = batch_fake_labels[indx_drop]
assert len(batch_fake_labels_tmp)==batch_size_tmp
##update corresponding fake images
z = torch.randn(batch_size_tmp, dim_gan, dtype=torch.float).cuda()
batch_fake_images_tmp = netG(z, net_y2h(batch_fake_labels_tmp))
batch_fake_images[indx_drop] = batch_fake_images_tmp
batch_fake_labels_pred[indx_drop] = net_filter(batch_fake_images_tmp)
##update indices of dropped images
indx_drop_1 = batch_fake_labels_pred.view(-1)<vicinity_start
indx_drop_2 = batch_fake_labels_pred.view(-1)>vicinity_end
indx_drop = torch.cat((indx_drop_1.view(-1,1), indx_drop_2.view(-1,1)),dim=1)
indx_drop = torch.any(indx_drop, 1)
niter_tmp+=1
# print(niter_tmp, indx_drop.sum().item(), dre_batch_size)
###end while
indx_keep = (batch_fake_labels_pred.view(-1)>=vicinity_start)*(batch_fake_labels_pred.view(-1)<=vicinity_end)
assert indx_keep.sum().item()>0
batch_fake_images = batch_fake_images[indx_keep]
batch_real_images = batch_real_images[indx_keep] ##if do not do subsampling for real images too, the cDRE training does not converge
batch_real_labels = batch_real_labels[indx_keep] ##note that, here is batch_real_labels not batch_fake_labels!!!!
else:
with torch.no_grad():
z = torch.randn(dre_batch_size, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_real_labels))
batch_fake_images = batch_fake_images.detach()
## extract features from real and fake images
with torch.no_grad():
batch_features_real = dre_precnn_net(batch_real_images)
batch_features_real = batch_features_real.detach()
batch_features_fake = dre_precnn_net(batch_fake_images)
batch_features_fake = batch_features_fake.detach()
del batch_real_images, batch_fake_images; gc.collect()
## density ratios for real images
DR_real = dre_net(batch_features_real, net_y2h(batch_real_labels))
## density ratios for fake images
DR_fake = dre_net(batch_features_fake, net_y2h(batch_real_labels)) ##Please note that use batch_real_labels here !!!!
#################################################
#Softplus loss
softplus_fn = torch.nn.Softplus(beta=1,threshold=20)
sigmoid_fn = torch.nn.Sigmoid()
SP_div = torch.mean(sigmoid_fn(DR_fake) * DR_fake) - torch.mean(softplus_fn(DR_fake)) - torch.mean(sigmoid_fn(DR_real))
penalty = dre_lambda * (torch.mean(DR_fake) - 1)**2
loss = SP_div + penalty
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
print("cDRE+{}+lambda{}: [step {}/{}] [epoch {}/{}] [train loss {:.5f}] [fake batch {}/{}] [Time {:.4f}]".format(dre_net_name, dre_lambda, batch_idx+1, len(train_labels)//dre_batch_size, epoch+1, dre_epochs, train_loss/(batch_idx+1), len(batch_features_fake), dre_batch_size, timeit.default_timer()-start_time))
# #################################################
# ### debugging
# dre_net.eval()
# with torch.no_grad():
# DR_real2 = dre_net(batch_features_real, net_y2h(batch_real_labels))
# DR_fake2 = dre_net(batch_features_fake, net_y2h(batch_fake_labels))
# print("[Iter {}/{}], [epoch {}/{}], Debug (train):{:.4f}/{:.4f}".format(batch_idx, len(train_labels)//dre_batch_size, epoch+1, dre_epochs, DR_real.mean(),DR_fake.mean()))
# print("[Iter {}/{}], [epoch {}/{}], Debug (eval):{:.4f}/{:.4f}".format(batch_idx, len(train_labels)//dre_batch_size, epoch+1, dre_epochs, DR_real2.mean(),DR_fake2.mean()))
# end for batch_idx
# print("cDRE+{}+lambda{}: [epoch {}/{}] [train loss {}] [Time {}]".format(dre_net_name, dre_lambda, epoch+1, dre_epochs, train_loss/(batch_idx+1), timeit.default_timer()-start_time))
avg_train_loss.append(train_loss/(batch_idx+1))
# save checkpoint
if path_to_ckpt is not None and ((epoch+1) % dre_save_freq == 0 or (epoch+1)==dre_epochs):
ckpt_fullpath = path_to_ckpt + "/cDRE_{}_checkpoint_epoch_{}.pth".format(dre_net_name, epoch+1)
torch.save({
'epoch': epoch,
'net_state_dict': dre_net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, ckpt_fullpath)
# save loss
logfile_fullpath = path_to_ckpt + "/cDRE_{}_train_loss_epoch_{}.npz".format(dre_net_name, epoch+1)
np.savez(logfile_fullpath, np.array(avg_train_loss))
#end for epoch
#back to memory
dre_precnn_net = dre_precnn_net.cpu()
netG = netG.cpu()
net_y2h = net_y2h.cpu()
dre_net = dre_net.cpu()
if net_filter is not None:
net_filter = net_filter.cpu()
return dre_net, avg_train_loss
#end for def
| 12,763 | 46.099631 | 323 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/train_cnn.py | ''' For CNN training and testing. '''
import os
import timeit
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
''' function for cnn training '''
def train_cnn(net, net_name, train_images, train_labels, testloader, epochs, resume_epoch=0, save_freq=40, batch_size=128, lr_base=0.01, lr_decay_factor=0.1, lr_decay_epochs=[150, 250], weight_decay=1e-4, path_to_ckpt = None, fn_denorm_labels=None):
'''
train_images: unnormalized images
train_labels: normalized labels
'''
assert train_images.max()>1 and train_images.max()<=255.0 and train_images.min()>=0
assert train_labels.min()>=0 and train_labels.max()<=1.0
unique_train_labels = np.sort(np.array(list(set(train_labels)))) ##sorted unique labels
indx_all = np.arange(len(train_labels))
''' learning rate decay '''
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate """
lr = lr_base
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
net = net.cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lr_base, momentum= 0.9, weight_decay=weight_decay)
if path_to_ckpt is not None and resume_epoch>0:
save_file = path_to_ckpt + "/{}_checkpoint_epoch_{}.pth".format(net_name, resume_epoch)
checkpoint = torch.load(save_file)
net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
train_loss_all = []
test_mae_all = []
start_time = timeit.default_timer()
for epoch in range(resume_epoch, epochs):
net.train()
train_loss = 0
adjust_learning_rate(optimizer, epoch)
for batch_idx in range(len(train_labels)//batch_size):
# ### generate target labels
# batch_target_labels = np.random.choice(unique_train_labels, size=batch_size, replace=True)
# batch_unique_train_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
# batch_train_indx = []
# for j in range(len(batch_unique_train_labels)):
# indx_j = np.where(train_labels==batch_unique_train_labels[j])[0]
# indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
# batch_train_indx.append(indx_j)
# batch_train_indx = np.concatenate(batch_train_indx)
# batch_train_indx = batch_train_indx.reshape(-1)
batch_train_indx = np.random.choice(indx_all, size=batch_size, replace=True).reshape(-1)
### get some real images for training
batch_train_images = train_images[batch_train_indx]
batch_train_images = normalize_images(batch_train_images) ## normalize real images
batch_train_images = torch.from_numpy(batch_train_images).type(torch.float).cuda()
assert batch_train_images.max().item()<=1.0
batch_train_labels = train_labels[batch_train_indx]
batch_train_labels = torch.from_numpy(batch_train_labels).type(torch.float).view(-1,1).cuda()
#Forward pass
outputs = net(batch_train_images)
loss = criterion(outputs, batch_train_labels)
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
#end for batch_idx
train_loss = train_loss / (len(train_labels)//batch_size)
train_loss_all.append(train_loss)
test_mae = test_cnn(net, testloader, fn_denorm_labels=fn_denorm_labels, verbose=False)
test_mae_all.append(test_mae)
print('%s: [epoch %d/%d] train_loss:%.3f, test_mae:%.3f Time: %.4f' % (net_name, epoch+1, epochs, train_loss, test_mae, timeit.default_timer()-start_time))
# save checkpoint
if path_to_ckpt is not None and ((epoch+1) % save_freq == 0 or (epoch+1) == epochs) :
save_file = path_to_ckpt + "/{}_checkpoint_epoch_{}.pth".format(net_name, epoch+1)
torch.save({
'net_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for epoch
return net
def test_cnn(net, testloader, fn_denorm_labels=None, verbose=False):
net = net.cuda()
net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
abs_diff_avg = 0
total = 0
for _, (images, labels) in enumerate(testloader):
images = images.type(torch.float).cuda()
labels = labels.type(torch.float).view(-1).cpu().numpy()
outputs = net(images)
outputs = outputs.view(-1).cpu().numpy()
labels = fn_denorm_labels(labels)
outputs = fn_denorm_labels(outputs)
abs_diff_avg += np.sum(np.abs(labels-outputs))
total += len(labels)
test_mae = abs_diff_avg/total
if verbose:
print('\n Test MAE: {}.'.format(test_mae))
return test_mae
| 5,752 | 37.353333 | 249 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/train_sparseAE.py |
import torch
import torch.nn as nn
from torchvision.utils import save_image
import numpy as np
import os
import timeit
from utils import SimpleProgressBar
from opts import gen_synth_data_opts
''' Settings '''
args = gen_synth_data_opts()
# some parameters in the opts
epochs = args.dre_presae_epochs
base_lr = args.dre_presae_lr_base
lr_decay_epochs = args.dre_presae_lr_decay_freq
lr_decay_factor = args.dre_presae_lr_decay_factor
lambda_sparsity = args.dre_presae_lambda_sparsity
lambda_regression = args.dre_presae_lambda_regression
resume_epoch = args.dre_presae_resume_epoch
weigth_decay = args.dre_presae_weight_decay
batch_size = args.dre_presae_batch_size_train
## normalize images
def normalize_images(batch_images):
batch_images = batch_images/255.0
batch_images = (batch_images - 0.5)/0.5
return batch_images
# decay learning rate every args.dre_lr_decay_epochs epochs
def adjust_learning_rate(epoch, epochs, optimizer, base_lr, lr_decay_epochs, lr_decay_factor):
lr = base_lr #1e-4
for i in range(epochs//lr_decay_epochs):
if epoch >= (i+1)*lr_decay_epochs:
lr *= lr_decay_factor
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_sparseAE(unique_labels, train_images, train_labels, net_encoder, net_decoder, net_predict, save_sae_images_folder, path_to_ckpt=None):
'''
train_images: unnormalized
train_labels: normalized to [0,1]
'''
assert train_images.max()>1.0 and train_images.max()<=255.0
assert train_labels.max()<=1.0 and train_labels.min()>=0
# nets
net_encoder = net_encoder.cuda()
net_decoder = net_decoder.cuda()
net_predict = net_predict.cuda()
# define optimizer
params = list(net_encoder.parameters()) + list(net_decoder.parameters()) + list(net_predict.parameters())
optimizer = torch.optim.SGD(params, lr = base_lr, momentum= 0.9, weight_decay=weigth_decay)
# optimizer = torch.optim.Adam(params, lr = base_lr, betas=(0, 0.999), weight_decay=weigth_decay)
# criterion
criterion = nn.MSELoss()
if path_to_ckpt is not None and resume_epoch>0:
print("Loading ckpt to resume training sparseAE >>>")
ckpt_fullpath = path_to_ckpt + "/PreSAEForDRE_checkpoint_intrain/PreSAEForDRE_checkpoint_epoch_{}_sparsity_{:.3f}_regre_{:.3f}.pth".format(resume_epoch, lambda_sparsity, lambda_regression)
checkpoint = torch.load(ckpt_fullpath)
net_encoder.load_state_dict(checkpoint['net_encoder_state_dict'])
net_decoder.load_state_dict(checkpoint['net_decoder_state_dict'])
net_predict.load_state_dict(checkpoint['net_predict_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
gen_iterations = checkpoint['gen_iterations']
else:
gen_iterations = 0
start_time = timeit.default_timer()
for epoch in range(resume_epoch, epochs):
adjust_learning_rate(epoch, epochs, optimizer, base_lr, lr_decay_epochs, lr_decay_factor)
train_loss = 0
train_loss1 = 0
train_loss2 = 0
for batch_idx in range(len(train_labels)//batch_size):
net_encoder.train()
net_decoder.train()
net_predict.train()
#################################################
''' generate target labels '''
batch_target_labels = np.random.choice(unique_labels, size=batch_size, replace=True)
batch_unique_labels, batch_unique_label_counts = np.unique(batch_target_labels, return_counts=True)
batch_real_indx = []
for j in range(len(batch_unique_labels)):
indx_j = np.where(train_labels==batch_unique_labels[j])[0]
indx_j = np.random.choice(indx_j, size=batch_unique_label_counts[j])
batch_real_indx.append(indx_j)
batch_real_indx = np.concatenate(batch_real_indx)
batch_real_indx = batch_real_indx.reshape(-1)
#################################################
## get some real images for training
batch_real_images = train_images[batch_real_indx]
batch_real_images = normalize_images(batch_real_images) ## normalize real images
batch_real_images = torch.from_numpy(batch_real_images).type(torch.float).cuda()
assert batch_real_images.max().item()<=1.0
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).cuda()
#################################################
## forward pass
batch_features = net_encoder(batch_real_images)
batch_recons_images = net_decoder(batch_features)
batch_pred_labels = net_predict(batch_features)
'''
based on https://debuggercafe.com/sparse-autoencoders-using-l1-regularization-with-pytorch/
'''
loss1 = criterion(batch_recons_images, batch_real_images) + lambda_sparsity * batch_features.mean()
loss2 = criterion(batch_pred_labels.view(-1), batch_real_labels.view(-1))
loss = loss1 + loss2*lambda_regression
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
train_loss1 += loss1.cpu().item()
train_loss2 += loss2.cpu().item()
gen_iterations += 1
if gen_iterations % 100 == 0:
net_encoder.eval()
net_decoder.eval()
n_row=min(10, int(np.sqrt(batch_size)))
with torch.no_grad():
batch_recons_images = net_decoder(net_encoder(batch_real_images[0:n_row**2]))
batch_recons_images = batch_recons_images.detach().cpu()
save_image(batch_recons_images.data, save_sae_images_folder + '/{}.png'.format(gen_iterations), nrow=n_row, normalize=True)
if gen_iterations % 20 == 0:
print("\r SparseAE+sparsity{:.3f}+regre{:.3f}: [step {}] [epoch {}/{}] [train loss {:.4f}={:.4f}+{:.4f}] [Time {:.4f}]".format(lambda_sparsity, lambda_regression, gen_iterations, epoch+1, epochs, train_loss/(batch_idx+1), train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), timeit.default_timer()-start_time) )
# end for batch_idx
if path_to_ckpt is not None and (epoch+1) % 50 == 0:
save_file = path_to_ckpt + "/PreSAEForDRE_checkpoint_intrain/PreSAEForDRE_checkpoint_epoch_{}_sparsity_{:.3f}_regre_{:.3f}.pth".format(epoch+1, lambda_sparsity, lambda_regression)
os.makedirs(os.path.dirname(save_file), exist_ok=True)
torch.save({
'gen_iterations': gen_iterations,
'net_encoder_state_dict': net_encoder.state_dict(),
'net_decoder_state_dict': net_decoder.state_dict(),
'net_predict_state_dict': net_predict.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for epoch
net_encoder = net_encoder.cpu()
net_decoder = net_decoder.cpu()
net_predict = net_predict.cpu()
return net_encoder, net_decoder, net_predict
| 7,406 | 41.815029 | 328 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/train_ccgan.py | import torch
import numpy as np
import os
import timeit
from PIL import Image
from torchvision.utils import save_image
from utils import *
from opts import gen_synth_data_opts
from DiffAugment_pytorch import DiffAugment
''' Settings '''
args = gen_synth_data_opts()
# some parameters in opts
loss_type = args.gan_loss_type
niters = args.gan_niters
resume_niters = args.gan_resume_niters
d_niters = args.gan_d_niters
dim_gan = args.gan_dim_g
lr_g = args.gan_lr_g
lr_d = args.gan_lr_d
save_niters_freq = args.gan_save_niters_freq
batch_size_disc = args.gan_batch_size_disc
batch_size_gene = args.gan_batch_size_gene
batch_size_max = max(batch_size_disc, batch_size_gene)
threshold_type = args.gan_threshold_type
nonzero_soft_weight_threshold = args.gan_nonzero_soft_weight_threshold
use_DiffAugment = args.gan_DiffAugment
policy = args.gan_DiffAugment_policy
def train_ccgan(kernel_sigma, kappa, train_images, train_labels, netG, netD, net_y2h, save_images_folder, path_to_ckpt = None, clip_label=False):
'''
Note that train_images are not normalized to [-1,1]
train_labels are normalized to [0,1]
'''
assert train_images.max()>1.0 and train_images.min()>=0 and train_images.max()<=255.0
assert train_labels.min()>=0 and train_labels.max()<=1.0
unique_train_labels = np.sort(np.array(list(set(train_labels))))
netG = netG.cuda()
netD = netD.cuda()
net_y2h = net_y2h.cuda()
net_y2h.eval()
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
if path_to_ckpt is not None and resume_niters>0:
save_file = path_to_ckpt + "/CcGAN_checkpoint_niters_{}.pth".format(resume_niters)
checkpoint = torch.load(save_file)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
# printed images with labels between the 5-th quantile and 95-th quantile of training labels
n_row=10; n_col = n_row
z_fixed = torch.randn(n_row*n_col, dim_gan, dtype=torch.float).cuda()
start_label = np.quantile(train_labels, 0.05)
end_label = np.quantile(train_labels, 0.95)
selected_labels = np.linspace(start_label, end_label, num=n_row)
y_fixed = np.zeros(n_row*n_col)
for i in range(n_row):
curr_label = selected_labels[i]
for j in range(n_col):
y_fixed[i*n_col+j] = curr_label
print(y_fixed)
y_fixed = torch.from_numpy(y_fixed).type(torch.float).view(-1,1).cuda()
start_time = timeit.default_timer()
for niter in range(resume_niters, niters):
for _ in range(d_niters):
''' Train Discriminator '''
## randomly draw batch_size_disc y's from unique_train_labels
batch_target_labels_in_dataset = np.random.choice(unique_train_labels, size=batch_size_disc, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_disc)
batch_target_labels = batch_target_labels_in_dataset + batch_epsilons
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
## find index of real images with labels in the vicinity of batch_target_labels
## generate labels for fake image generation; these labels are also in the vicinity of batch_target_labels
batch_real_indx = np.zeros(batch_size_disc, dtype=int) #index of images in the data; the labels of these images are in the vicinity
batch_fake_labels = np.zeros(batch_size_disc)
batch_size_of_vicinity = torch.zeros(batch_size_disc)
for j in range(batch_size_disc):
## index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
## if the max gap between two consecutive ordered unique labels is large, it is possible that len(indx_real_in_vicinity)<1
while len(indx_real_in_vicinity)<1:
batch_epsilons_j = np.random.normal(0, kernel_sigma, 1)
batch_target_labels[j] = batch_target_labels_in_dataset[j] + batch_epsilons_j
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
# index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
#end while len(indx_real_in_vicinity)<1
assert len(indx_real_in_vicinity)>=1
batch_size_of_vicinity[j] = len(indx_real_in_vicinity)
batch_real_indx[j] = np.random.choice(indx_real_in_vicinity, size=1)[0]
## labels for fake images generation
if threshold_type == "hard":
lb = batch_target_labels[j] - kappa
ub = batch_target_labels[j] + kappa
else:
lb = batch_target_labels[j] - np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
ub = batch_target_labels[j] + np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
lb = max(0.0, lb); ub = min(ub, 1.0)
assert lb<=ub
assert lb>=0 and ub>=0
assert lb<=1 and ub<=1
batch_fake_labels[j] = np.random.uniform(lb, ub, size=1)[0]
#end for j
## draw the real image batch from the training set
batch_real_images = train_images[batch_real_indx]
assert batch_real_images.max()>1
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).cuda()
## normalize real images
trainset = IMGs_dataset(batch_real_images, labels=None, normalize=True)
train_dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size_disc, shuffle=False)
train_dataloader = iter(train_dataloader)
batch_real_images = train_dataloader.next()
assert len(batch_real_images) == batch_size_disc
batch_real_images = batch_real_images.type(torch.float).cuda()
assert batch_real_images.max().item()<=1
## generate the fake image batch
batch_fake_labels = torch.from_numpy(batch_fake_labels).type(torch.float).cuda()
z = torch.randn(batch_size_disc, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_fake_labels))
## target labels on gpu
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).cuda()
## weight vector
if threshold_type == "soft":
real_weights = torch.exp(-kappa*(batch_real_labels-batch_target_labels)**2).cuda()
fake_weights = torch.exp(-kappa*(batch_fake_labels-batch_target_labels)**2).cuda()
else:
real_weights = torch.ones(batch_size_disc, dtype=torch.float).cuda()
fake_weights = torch.ones(batch_size_disc, dtype=torch.float).cuda()
# forward pass
if use_DiffAugment:
real_dis_out = netD(DiffAugment(batch_real_images, policy=policy), net_y2h(batch_target_labels))
fake_dis_out = netD(DiffAugment(batch_fake_images.detach(), policy=policy), net_y2h(batch_target_labels))
else:
real_dis_out = netD(batch_real_images, net_y2h(batch_target_labels))
fake_dis_out = netD(batch_fake_images.detach(), net_y2h(batch_target_labels))
if loss_type == "vanilla":
real_dis_out = torch.nn.Sigmoid()(real_dis_out)
fake_dis_out = torch.nn.Sigmoid()(fake_dis_out)
d_loss_real = - torch.log(real_dis_out+1e-20)
d_loss_fake = - torch.log(1-fake_dis_out+1e-20)
elif loss_type == "hinge":
d_loss_real = torch.nn.ReLU()(1.0 - real_dis_out)
d_loss_fake = torch.nn.ReLU()(1.0 + fake_dis_out)
d_loss = torch.mean(real_weights.view(-1) * d_loss_real.view(-1)) + torch.mean(fake_weights.view(-1) * d_loss_fake.view(-1))
optimizerD.zero_grad()
d_loss.backward()
optimizerD.step()
##end D update
''' Train Generator '''
netG.train()
# generate fake images
## randomly draw batch_size_disc y's from unique_train_labels
batch_target_labels_in_dataset = np.random.choice(unique_train_labels, size=batch_size_gene, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_gene)
batch_target_labels = batch_target_labels_in_dataset + batch_epsilons
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).cuda()
z = torch.randn(batch_size_gene, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, net_y2h(batch_target_labels))
# loss
if use_DiffAugment:
dis_out = netD(DiffAugment(batch_fake_images, policy=policy), net_y2h(batch_target_labels))
else:
dis_out = netD(batch_fake_images, net_y2h(batch_target_labels))
if loss_type == "vanilla":
dis_out = torch.nn.Sigmoid()(dis_out)
g_loss = - torch.mean(torch.log(dis_out+1e-20))
elif loss_type == "hinge":
g_loss = - dis_out.mean()
# backward
optimizerG.zero_grad()
g_loss.backward()
optimizerG.step()
# print loss
if (niter+1) % 20 == 0:
print ("CcGAN: [Iter %d/%d] [D loss: %.4f] [G loss: %.4f] [real prob: %.3f] [fake prob: %.3f] [Time: %.4f]" % (niter+1, niters, d_loss.item(), g_loss.item(), real_dis_out.mean().item(), fake_dis_out.mean().item(), timeit.default_timer()-start_time))
if (niter+1) % 100 == 0:
netG.eval()
with torch.no_grad():
gen_imgs = netG(z_fixed, net_y2h(y_fixed))
gen_imgs = gen_imgs.detach().cpu()
save_image(gen_imgs.data, save_images_folder + '/{}.png'.format(niter+1), nrow=n_row, normalize=True)
if path_to_ckpt is not None and ((niter+1) % save_niters_freq == 0 or (niter+1) == niters):
save_file = path_to_ckpt + "/CcGAN_checkpoint_niters_{}.pth".format(niter+1)
os.makedirs(os.path.dirname(save_file), exist_ok=True)
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
'optimizerG_state_dict': optimizerG.state_dict(),
'optimizerD_state_dict': optimizerD.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for niter
return netG, netD
def SampCcGAN_given_labels(netG, net_y2h, labels, batch_size = 100, to_numpy=True, verbose=True):
'''
labels: a numpy array; normalized label in [0,1]
'''
assert labels.min()>=0 and labels.max()<=1.0
nfake = len(labels)
if batch_size>nfake:
batch_size=nfake
fake_images = []
fake_labels = np.concatenate((labels, labels[0:batch_size]))
netG = netG.cuda()
netG.eval()
net_y2h = net_y2h.cuda()
net_y2h.eval()
with torch.no_grad():
if verbose:
pb = SimpleProgressBar()
n_img_got = 0
while n_img_got < nfake:
z = torch.randn(batch_size, dim_gan, dtype=torch.float).cuda()
y = torch.from_numpy(fake_labels[n_img_got:(n_img_got+batch_size)]).type(torch.float).view(-1,1).cuda()
batch_fake_images = netG(z, net_y2h(y))
fake_images.append(batch_fake_images.cpu())
n_img_got += batch_size
if verbose:
pb.update(min(float(n_img_got)/nfake, 1)*100)
##end while
fake_images = torch.cat(fake_images, dim=0)
#remove extra entries
fake_images = fake_images[0:nfake]
fake_labels = fake_labels[0:nfake]
if to_numpy:
fake_images = fake_images.numpy()
netG = netG.cpu()
net_y2h = net_y2h.cpu()
return fake_images, fake_labels | 13,362 | 43.395349 | 261 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/shufflenetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
preact = self.bn3(self.conv3(out))
out = F.relu(preact)
# out = F.relu(self.bn3(self.conv3(out)))
preact = torch.cat([x1, preact], 1)
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
if self.is_last:
return out, preact
else:
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
# stride=1, padding=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Sequential(
nn.Linear(out_channels[3], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1)))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out, _ = self.layer1(out)
out, _ = self.layer2(out)
out, _ = self.layer3(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.2: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 3, 3)
},
0.3: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 7, 3)
},
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def ShuffleV2(**kwargs):
model = ShuffleNetV2(net_size=1, **kwargs)
return model
if __name__ == '__main__':
net = ShuffleV2()
x = torch.randn(4, 3, 64, 64)
import time
a = time.time()
out = net(x)
b = time.time()
print(b - a)
print(out.shape)
| 6,654 | 32.442211 | 107 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/SAGAN.py | '''
SAGAN arch
Adapted from https://github.com/voletiv/self-attention-GAN-pytorch/blob/master/sagan_models.py
'''
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import spectral_norm
from torch.nn.init import xavier_uniform_
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.)
def snconv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))
def snlinear(in_features, out_features, bias=True):
return spectral_norm(nn.Linear(in_features=in_features, out_features=out_features, bias=bias))
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels):
super(Self_Attn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2, kernel_size=1, stride=1, padding=0)
self.snconv1x1_attn = snconv2d(in_channels=in_channels//2, out_channels=in_channels, kernel_size=1, stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.sigma = nn.Parameter(torch.zeros(1))
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_attn(attn_g)
# Out
out = x + self.sigma*attn_g
return out
'''
Generator
'''
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, dim_embed):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, momentum=0.001, affine=False)
self.embed_gamma = nn.Linear(dim_embed, num_features, bias=False)
self.embed_beta = nn.Linear(dim_embed, num_features, bias=False)
def forward(self, x, y):
out = self.bn(x)
gamma = self.embed_gamma(y).view(-1, self.num_features, 1, 1)
beta = self.embed_beta(y).view(-1, self.num_features, 1, 1)
out = out + gamma*out + beta
return out
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, dim_embed):
super(GenBlock, self).__init__()
self.cond_bn1 = ConditionalBatchNorm2d(in_channels, dim_embed)
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.cond_bn2 = ConditionalBatchNorm2d(out_channels, dim_embed)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, labels):
x0 = x
x = self.cond_bn1(x, labels)
x = self.relu(x)
x = F.interpolate(x, scale_factor=2, mode='nearest') # upsample
x = self.snconv2d1(x)
x = self.cond_bn2(x, labels)
x = self.relu(x)
x = self.snconv2d2(x)
x0 = F.interpolate(x0, scale_factor=2, mode='nearest') # upsample
x0 = self.snconv2d0(x0)
out = x + x0
return out
class SAGAN_Generator(nn.Module):
"""Generator."""
def __init__(self, z_dim=256, nc=3, gene_ch=128, dim_embed=128):
super(SAGAN_Generator, self).__init__()
self.z_dim = z_dim
self.gene_ch = gene_ch
self.snlinear0 = snlinear(in_features=z_dim, out_features=gene_ch*16*4*4)
self.block1 = GenBlock(gene_ch*16, gene_ch*8, dim_embed)
self.block2 = GenBlock(gene_ch*8, gene_ch*4, dim_embed)
self.block3 = GenBlock(gene_ch*4, gene_ch*2, dim_embed)
self.self_attn = Self_Attn(gene_ch*2)
self.block4 = GenBlock(gene_ch*2, gene_ch, dim_embed)
self.bn = nn.BatchNorm2d(gene_ch, eps=1e-5, momentum=0.0001, affine=True)
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=gene_ch, out_channels=nc, kernel_size=3, stride=1, padding=1)
self.tanh = nn.Tanh()
# Weight init
self.apply(init_weights)
def forward(self, z, labels):
# n x z_dim
out = self.snlinear0(z) # 4*4
out = out.view(-1, self.gene_ch*16, 4, 4) # 4 x 4
out = self.block1(out, labels) # 8 x 8
out = self.block2(out, labels) # 16 x 16
out = self.block3(out, labels) # 32 x 32
out = self.self_attn(out) # 32 x 32
out = self.block4(out, labels) # 64 x 64
out = self.bn(out)
out = self.relu(out)
out = self.snconv2d1(out)
out = self.tanh(out)
return out
'''
Discriminator
'''
class DiscOptBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DiscOptBlock, self).__init__()
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.downsample = nn.AvgPool2d(2)
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x0 = x
x = self.snconv2d1(x)
x = self.relu(x)
x = self.snconv2d2(x)
x = self.downsample(x)
x0 = self.downsample(x0)
x0 = self.snconv2d0(x0)
out = x + x0
return out
class DiscBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DiscBlock, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.downsample = nn.AvgPool2d(2)
self.ch_mismatch = False
if in_channels != out_channels:
self.ch_mismatch = True
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, downsample=True):
x0 = x
x = self.relu(x)
x = self.snconv2d1(x)
x = self.relu(x)
x = self.snconv2d2(x)
if downsample:
x = self.downsample(x)
if downsample or self.ch_mismatch:
x0 = self.snconv2d0(x0)
if downsample:
x0 = self.downsample(x0)
out = x + x0
return out
class SAGAN_Discriminator(nn.Module):
"""Discriminator."""
def __init__(self, nc=3, disc_ch=128, dim_embed=128):
super(SAGAN_Discriminator, self).__init__()
self.disc_ch = disc_ch
self.opt_block1 = DiscOptBlock(nc, disc_ch)
self.self_attn = Self_Attn(disc_ch)
self.block1 = DiscBlock(disc_ch, disc_ch*2)
self.block2 = DiscBlock(disc_ch*2, disc_ch*4)
self.block3 = DiscBlock(disc_ch*4, disc_ch*8)
self.block4 = DiscBlock(disc_ch*8, disc_ch*16)
self.relu = nn.ReLU(inplace=True)
self.snlinear1 = snlinear(in_features=disc_ch*16*4*4, out_features=1)
self.sn_embedding1 = snlinear(dim_embed, disc_ch*16*4*4, bias=False)
# Weight init
self.apply(init_weights)
xavier_uniform_(self.sn_embedding1.weight)
def forward(self, x, labels):
# 64x64
out = self.opt_block1(x) # 32 x 32
out = self.self_attn(out) # 32 x 32
out = self.block1(out) # 16 x 16
out = self.block2(out) # 8 x 8
out = self.block3(out) # 4 x 4
out = self.block4(out, downsample=False) # 4 x 4
out = self.relu(out) # n x disc_ch*16 x 4 x 4
out = out.view(-1, self.disc_ch*16*4*4) # n x (disc_ch*16*4*4)
output1 = torch.squeeze(self.snlinear1(out)) # n
# Projection
h_labels = self.sn_embedding1(labels) # n x disc_ch*16
proj = torch.mul(out, h_labels) # n x disc_ch*16
output2 = torch.sum(proj, dim=[1]) # n
# Out
output = output1 + output2 # n
return output
if __name__ == "__main__":
netG = SAGAN_Generator(z_dim=256, gene_ch=128, dim_embed=128).cuda()
netD = SAGAN_Discriminator(disc_ch=128, dim_embed=128).cuda()
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
N=8
z = torch.randn(N, 256).cuda()
y = torch.randn(N, 128).cuda()
x = netG(z,y)
o = netD(x,y)
print(x.size())
print(o.size())
| 10,076 | 33.748276 | 129 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Sequential(
nn.Linear(cfg['out_channels'][-1], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0():
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 64, 64)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test() | 5,970 | 31.275676 | 106 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/ResNet_embed.py | '''
ResNet-based model to map an image from pixel space to a features space.
Need to be pretrained on the dataset.
if isometric_map = True, there is an extra step (elf.classifier_1 = nn.Linear(512, 32*32*3)) to increase the dimension of the feature map from 512 to 32*32*3. This selection is for desity-ratio estimation in feature space.
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
NC = 3
IMG_SIZE = 64
DIM_EMBED = 128
#------------------------------------------------------------------------------
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_embed(nn.Module):
def __init__(self, block, num_blocks, nc=NC, dim_embed=DIM_EMBED):
super(ResNet_embed, self).__init__()
self.in_planes = 64
self.main = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h
# nn.Conv2d(nc, 64, kernel_size=4, stride=2, padding=1, bias=False), # h=h/2
nn.BatchNorm2d(64),
nn.ReLU(),
# self._make_layer(block, 64, num_blocks[0], stride=1), # h=h
self._make_layer(block, 64, num_blocks[0], stride=2), # h=h/2 32
self._make_layer(block, 128, num_blocks[1], stride=2), # h=h/2 16
self._make_layer(block, 256, num_blocks[2], stride=2), # h=h/2 8
self._make_layer(block, 512, num_blocks[3], stride=2), # h=h/2 4
# nn.AvgPool2d(kernel_size=4)
nn.AdaptiveAvgPool2d((1, 1))
)
self.x2h_res = nn.Sequential(
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, dim_embed),
nn.BatchNorm1d(dim_embed),
nn.ReLU(),
)
self.h2y = nn.Sequential(
nn.Linear(dim_embed, 1),
nn.ReLU()
)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
features = self.main(x)
features = features.view(features.size(0), -1)
features = self.x2h_res(features)
out = self.h2y(features)
return out, features
def ResNet18_embed(dim_embed=DIM_EMBED):
return ResNet_embed(BasicBlock, [2,2,2,2], dim_embed=dim_embed)
def ResNet34_embed(dim_embed=DIM_EMBED):
return ResNet_embed(BasicBlock, [3,4,6,3], dim_embed=dim_embed)
def ResNet50_embed(dim_embed=DIM_EMBED):
return ResNet_embed(Bottleneck, [3,4,6,3], dim_embed=dim_embed)
#------------------------------------------------------------------------------
# map labels to the embedding space
class model_y2h(nn.Module):
def __init__(self, dim_embed=DIM_EMBED):
super(model_y2h, self).__init__()
self.main = nn.Sequential(
nn.Linear(1, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
# nn.BatchNorm1d(dim_embed),
nn.GroupNorm(8, dim_embed),
nn.ReLU(),
nn.Linear(dim_embed, dim_embed),
nn.ReLU()
)
def forward(self, y):
y = y.view(-1, 1) +1e-8
# y = torch.exp(y.view(-1, 1))
return self.main(y)
if __name__ == "__main__":
net = ResNet34_embed(dim_embed=128).cuda()
x = torch.randn(16,NC,IMG_SIZE,IMG_SIZE).cuda()
out, features = net(x)
print(out.size())
print(features.size())
net_y2h = model_y2h()
| 6,302 | 32.526596 | 222 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/autoencoder_extract.py | import torch
from torch import nn
class encoder_extract(nn.Module):
def __init__(self, dim_bottleneck=64*64*3, ch=32):
super(encoder_extract, self).__init__()
self.ch = ch
self.dim_bottleneck = dim_bottleneck
self.conv = nn.Sequential(
nn.Conv2d(3, ch, kernel_size=4, stride=2, padding=1), #h=h/2; 32
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=h/2; 16
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch*2, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.Conv2d(ch*2, ch*2, kernel_size=4, stride=2, padding=1), #h=h/2; 8
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.Conv2d(ch*2, ch*4, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
nn.Conv2d(ch*4, ch*4, kernel_size=4, stride=2, padding=1), #h=h/2; 4
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
nn.Conv2d(ch*4, ch*4, kernel_size=3, stride=1, padding=1), #h=h; 4
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
)
self.fc = nn.Sequential(
nn.Linear(ch*4*4*4, dim_bottleneck),
nn.ReLU()
)
def forward(self, x):
feature = self.conv(x)
feature = feature.view(-1, self.ch*4*4*4)
feature = self.fc(feature)
return feature
class decoder_extract(nn.Module):
def __init__(self, dim_bottleneck=64*64*3, ch=32):
super(decoder_extract, self).__init__()
self.ch = ch
self.dim_bottleneck = dim_bottleneck
self.fc = nn.Sequential(
nn.Linear(dim_bottleneck, ch*4*4*4),
nn.BatchNorm1d(ch*4*4*4),
nn.ReLU(True)
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(ch*4, ch*4, kernel_size=4, stride=2, padding=1), #h=2h; 8
nn.BatchNorm2d(ch*4),
nn.ReLU(True),
nn.Conv2d(ch*4, ch*2, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.ConvTranspose2d(ch*2, ch*2, kernel_size=4, stride=2, padding=1), #h=2h; 16
nn.BatchNorm2d(ch*2),
nn.ReLU(True),
nn.Conv2d(ch*2, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=2h; 32
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=2h; 64
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
nn.BatchNorm2d(ch),
nn.ReLU(True),
nn.Conv2d(ch, 3, kernel_size=1, stride=1, padding=0), #h=h
nn.Tanh()
)
def forward(self, feature):
feature = self.fc(feature)
feature = feature.view(-1, self.ch*4, 4, 4)
out = self.deconv(feature)
return out
class decoder_predict(nn.Module):
def __init__(self, dim_bottleneck=64*64*3):
super(decoder_predict, self).__init__()
self.dim_bottleneck = dim_bottleneck
self.predict = nn.Sequential(
nn.Linear(self.dim_bottleneck, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 1),
nn.ReLU(),
)
def forward(self, feature):
return self.predict(feature)
if __name__=="__main__":
#test
net_encoder = encoder_extract(dim_bottleneck=64*64*3, ch=64).cuda()
net_decoder = decoder_extract(dim_bottleneck=64*64*3, ch=64).cuda()
net_predict = decoder_predict(dim_bottleneck=64*64*3).cuda()
net_encoder = nn.DataParallel(net_encoder)
net_decoder = nn.DataParallel(net_decoder)
net_predict = nn.DataParallel(net_predict)
x = torch.randn(10, 3, 64,64).cuda()
f = net_encoder(x)
xh = net_decoder(f)
yh = net_predict(f)
print(f.size())
print(xh.size())
print(yh.size())
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(net_encoder))
print(get_parameter_number(net_decoder))
print(get_parameter_number(net_predict))
| 5,073 | 30.320988 | 89 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock'):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, stride=1, padding=1, bias=False) #h=h
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n, stride=2)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Sequential(
nn.Linear(num_filters[3] * block.expansion, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == blocks-1)))
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out, _ = self.layer1(out) # 32x32
out, _ = self.layer2(out) # 16x16
out, _ = self.layer3(out) # 8x8
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def resnet8(**kwargs):
return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14(**kwargs):
return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet20(**kwargs):
return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet32(**kwargs):
return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet44(**kwargs):
return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet56(**kwargs):
return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet110(**kwargs):
return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet8x4(**kwargs):
return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet32x4(**kwargs):
return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs)
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 64, 64)
net = resnet8x4()
out = net(x)
print(out.size())
| 6,698 | 29.175676 | 116 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
from torch.autograd import Variable
cfg = {
'VGG8': [64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M'],
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
NC=3
class vgg(nn.Module):
def __init__(self, vgg_name):
super(vgg, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.fc = nn.Sequential(
nn.Linear(4*4*128, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = NC
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def vgg8():
model = vgg('VGG8')
return model
def vgg11():
model = vgg('VGG11')
return model
def vgg13():
model = vgg('VGG13')
return model
def vgg16():
model = vgg('VGG16')
return model
def vgg19():
model = vgg('VGG19')
return model
if __name__ == "__main__":
net = vgg8().cuda()
net = nn.DataParallel(net)
x = torch.randn(4,3,64,64)
print(net(x).size())
| 2,119 | 24.853659 | 117 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/shufflenetv1.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it (Following Table 5 of "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design")
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.stride = stride
mid_planes = int(out_planes/4)
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
preact = torch.cat([out, res], 1) if self.stride == 2 else out+res
out = F.relu(preact)
# out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) #original
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Sequential(
nn.Linear(out_planes[2], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes,
stride=stride,
groups=groups,
is_last=(i == num_blocks - 1)))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleV1(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
if __name__ == '__main__':
x = torch.randn(2, 3, 64, 64)
net = ShuffleV1()
import time
a = time.time()
out = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
print(out.shape)
| 4,440 | 33.968504 | 190 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/SNGAN.py | '''
https://github.com/christiancosgrove/pytorch-spectral-normalization-gan
chainer: https://github.com/pfnet-research/sngan_projection
'''
# ResNet generator and discriminator
import torch
from torch import nn
import torch.nn.functional as F
# from spectral_normalization import SpectralNorm
import numpy as np
from torch.nn.utils import spectral_norm
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, dim_embed):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.embed_gamma = nn.Linear(dim_embed, num_features, bias=False)
self.embed_beta = nn.Linear(dim_embed, num_features, bias=False)
def forward(self, x, y):
out = self.bn(x)
gamma = self.embed_gamma(y).view(-1, self.num_features, 1, 1)
beta = self.embed_beta(y).view(-1, self.num_features, 1, 1)
out = out + out*gamma + beta
return out
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, dim_embed, bias=True):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=bias)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=bias)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
self.condbn1 = ConditionalBatchNorm2d(in_channels, dim_embed)
self.condbn2 = ConditionalBatchNorm2d(out_channels, dim_embed)
self.relu = nn.ReLU()
self.upsample = nn.Upsample(scale_factor=2)
# unconditional case
self.model = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Upsample(scale_factor=2),
self.conv1,
nn.BatchNorm2d(out_channels),
nn.ReLU(),
self.conv2
)
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0, bias=bias) #h=h
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
self.bypass = nn.Sequential(
nn.Upsample(scale_factor=2),
self.bypass_conv,
)
def forward(self, x, y):
if y is not None:
out = self.condbn1(x, y)
out = self.relu(out)
out = self.upsample(out)
out = self.conv1(out)
out = self.condbn2(out, y)
out = self.relu(out)
out = self.conv2(out)
out = out + self.bypass(x)
else:
out = self.model(x) + self.bypass(x)
return out
class ResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=True)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
if stride == 1:
self.model = nn.Sequential(
nn.ReLU(),
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2)
)
else:
self.model = nn.Sequential(
nn.ReLU(),
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2),
nn.AvgPool2d(2, stride=stride, padding=0)
)
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0, bias=True)
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
if stride != 1:
self.bypass = nn.Sequential(
spectral_norm(self.bypass_conv),
nn.AvgPool2d(2, stride=stride, padding=0)
)
else:
self.bypass = nn.Sequential(
spectral_norm(self.bypass_conv),
)
def forward(self, x):
return self.model(x) + self.bypass(x)
# special ResBlock just for the first layer of the discriminator
class FirstResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(FirstResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=True)
self.bypass_conv = nn.Conv2d(in_channels, out_channels, 1, 1, padding=0, bias=True)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
# we don't want to apply ReLU activation to raw image before convolution transformation.
self.model = nn.Sequential(
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2),
nn.AvgPool2d(2)
)
self.bypass = nn.Sequential(
nn.AvgPool2d(2),
spectral_norm(self.bypass_conv),
)
def forward(self, x):
return self.model(x) + self.bypass(x)
class SNGAN_Generator(nn.Module):
def __init__(self, z_dim=256, nc=3, gene_ch=128, dim_embed=128):
super(SNGAN_Generator, self).__init__()
self.z_dim = z_dim
self.dim_embed = dim_embed
self.gene_ch = gene_ch
self.dense = nn.Linear(self.z_dim, 4 * 4 * gene_ch*16, bias=True)
self.final = nn.Conv2d(gene_ch, nc, 3, stride=1, padding=1, bias=True)
nn.init.xavier_uniform_(self.dense.weight.data, 1.)
nn.init.xavier_uniform_(self.final.weight.data, 1.)
self.genblock0 = ResBlockGenerator(gene_ch*16, gene_ch*8, dim_embed=dim_embed) #4--->8
self.genblock1 = ResBlockGenerator(gene_ch*8, gene_ch*4, dim_embed=dim_embed) #8--->16
self.genblock2 = ResBlockGenerator(gene_ch*4, gene_ch*2, dim_embed=dim_embed) #16--->32
self.genblock3 = ResBlockGenerator(gene_ch*2, gene_ch, dim_embed=dim_embed) #32--->64
self.final = nn.Sequential(
nn.BatchNorm2d(gene_ch),
nn.ReLU(),
self.final,
nn.Tanh()
)
def forward(self, z, y): #y is embedded in the feature space
z = z.view(z.size(0), z.size(1))
out = self.dense(z)
out = out.view(-1, self.gene_ch*16, 4, 4)
out = self.genblock0(out, y)
out = self.genblock1(out, y)
out = self.genblock2(out, y)
out = self.genblock3(out, y)
out = self.final(out)
return out
class SNGAN_Discriminator(nn.Module):
def __init__(self, nc=3, disc_ch=128, dim_embed=128):
super(SNGAN_Discriminator, self).__init__()
self.dim_embed = dim_embed
self.disc_ch = disc_ch
self.discblock1 = nn.Sequential(
FirstResBlockDiscriminator(nc, disc_ch, stride=2), #64--->32
ResBlockDiscriminator(disc_ch, disc_ch*2, stride=2), #32--->16
ResBlockDiscriminator(disc_ch*2, disc_ch*4, stride=2), #16--->8
)
self.discblock2 = ResBlockDiscriminator(disc_ch*4, disc_ch*8, stride=2) #8--->4
self.discblock3 = nn.Sequential(
ResBlockDiscriminator(disc_ch*8, disc_ch*16, stride=1), #4--->4;
nn.ReLU(),
)
self.linear1 = nn.Linear(disc_ch*16*4*4, 1, bias=True)
nn.init.xavier_uniform_(self.linear1.weight.data, 1.)
self.linear1 = spectral_norm(self.linear1)
self.linear2 = nn.Linear(self.dim_embed, disc_ch*16*4*4, bias=False)
nn.init.xavier_uniform_(self.linear2.weight.data, 1.)
self.linear2 = spectral_norm(self.linear2)
def forward(self, x, y):
output = self.discblock1(x)
output = self.discblock2(output)
output = self.discblock3(output)
output = output.view(-1, self.disc_ch*16*4*4)
output_y = torch.sum(output*self.linear2(y), 1, keepdim=True)
output = self.linear1(output) + output_y
return output.view(-1, 1)
if __name__ == "__main__":
netG = SNGAN_Generator(z_dim=256, gene_ch=128, dim_embed=128).cuda()
netD = SNGAN_Discriminator(disc_ch=128, dim_embed=128).cuda()
# netG = nn.DataParallel(netG)
# netD = nn.DataParallel(netD)
N=4
z = torch.randn(N, 256).cuda()
y = torch.randn(N, 128).cuda()
x = netG(z,y)
o = netD(x,y)
print(x.size())
print(o.size())
| 8,633 | 34.240816 | 96 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/densenet.py | '''DenseNet in PyTorch.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
NC=3
IMG_SIZE = 64
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
# self.conv1 = nn.Conv2d(NC, num_planes, kernel_size=3, padding=1, bias=False)
self.conv1 = nn.Sequential(
nn.Conv2d(NC, num_planes, kernel_size=4, padding=1, stride=2, bias=False),
nn.BatchNorm2d(num_planes),
nn.ReLU(True),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Sequential(
nn.Linear(num_planes, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
def test_densenet():
net = DenseNet121()
x = torch.randn(2,NC,IMG_SIZE,IMG_SIZE)
y = net(Variable(x))
print(y.shape)
if __name__ == "__main__":
test_densenet()
| 4,332 | 31.335821 | 96 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/resnetv2.py | '''
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
NC = 3
IMG_SIZE = 64
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, nc=NC):
super(ResNet, self).__init__()
self.in_planes = 64
self.block1 = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
self._make_layer(block, 64, num_blocks[0], stride=2), # h=h/2 32
)
self.block2 = self._make_layer(block, 128, num_blocks[1], stride=2) # h=h/2 16
self.block3 = self._make_layer(block, 256, num_blocks[2], stride=2) # h=h/2 8
self.block4 = self._make_layer(block, 512, num_blocks[3], stride=2) # h=h/2 4
self.pool = nn.AvgPool2d(kernel_size=4)
linear_layers = [
nn.Linear(512*block.expansion, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
]
self.linear = nn.Sequential(*linear_layers)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
ft1 = self.block1(x)
ft2 = self.block2(ft1)
ft3 = self.block3(ft2)
ft4 = self.block4(ft3)
out = self.pool(ft4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
if __name__ == "__main__":
net = ResNet34().cuda()
net = nn.DataParallel(net)
x = torch.randn(16,NC,IMG_SIZE,IMG_SIZE).cuda()
out = net(x)
print(out.size())
| 4,623 | 30.455782 | 102 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/cDR_MLP.py | '''
Conditional Density Ration Estimation via Multilayer Perceptron
Multilayer Perceptron : trained to model density ratio in a feature space
Its input is the output of a pretrained Deep CNN, say ResNet-34
'''
import torch
import torch.nn as nn
IMG_SIZE=64
NC=3
cfg = {"MLP3": [512,256,128],
"MLP5": [1024,512,256,128,64]}
class cDR_MLP(nn.Module):
def __init__(self, MLP_name, p_dropout=0.5, init_in_dim = IMG_SIZE**2*NC, dim_embed = 128):
super(cDR_MLP, self).__init__()
self.init_in_dim = init_in_dim
self.p_dropout=p_dropout
self.dim_embed = dim_embed
layers = self._make_layers(cfg[MLP_name])
layers += [nn.Linear(cfg[MLP_name][-1], 1)]
layers += [nn.ReLU()]
self.main = nn.Sequential(*layers)
def _make_layers(self, cfg):
layers = []
in_dim = self.init_in_dim #initial input dimension
for x in cfg:
if in_dim == self.init_in_dim:
layers += [nn.Linear(in_dim+self.dim_embed, x),
nn.GroupNorm(8, x),
nn.ReLU(inplace=True),
nn.Dropout(self.p_dropout) # do we really need dropout?
]
else:
layers += [nn.Linear(in_dim, x),
nn.GroupNorm(8, x),
nn.ReLU(inplace=True),
nn.Dropout(self.p_dropout) # do we really need dropout?
]
in_dim = x
return layers
def forward(self, x, labels):
x = torch.cat((labels, x), -1)
out = self.main(x)
return out
if __name__ == "__main__":
net = cDR_MLP('MLP5').cuda()
x = torch.randn((5,IMG_SIZE**2*NC)).cuda()
labels = torch.randn((5, 128)).cuda()
out = net(x, labels)
print(out.size())
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(net)) | 2,157 | 29.394366 | 95 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/mobilenet.py | import torch
from torch import nn
# from .utils import load_state_dict_from_url
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['MobileNetV2', 'mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
norm_layer(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self,
num_classes=1,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
block=None,
norm_layer=None):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Linear(self.last_channel, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
x = self.classifier(x)
return x
def forward(self, x):
return self._forward_impl(x)
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
if __name__ == "__main__":
net = mobilenet_v2().cuda()
x = torch.randn(16,3,64,64).cuda()
out = net(x)
print(out.size())
def count_parameters(module):
print('Number of parameters: {}'.format(sum([p.data.nelement() for p in module.parameters()])))
count_parameters(net)
| 7,609 | 35.238095 | 116 | py |
cGAN-KD | cGAN-KD-main/SteeringAngle/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
# self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100
self.bn0 = nn.BatchNorm2d(nChannels[0])
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Sequential(
nn.Linear(nChannels[3], 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 1),
nn.ReLU(),
)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
# out = self.conv1(x) #for cifar
out = F.relu(self.bn0(self.conv1(x)))
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
out = self.fc(out)
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 64, 64)
net = wrn_16_1()
out = net(x)
print(out.shape)
| 4,962 | 32.308725 | 116 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/main.py | print("\n ===================================================================================================")
#----------------------------------------
import argparse
import os
import timeit
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn import functional as F
import random
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
from torch import autograd
from torchvision.utils import save_image
from tqdm import tqdm, trange
import gc
from itertools import groupby
import multiprocessing
import h5py
import pickle
import copy
import shutil
#----------------------------------------
from opts import gen_synth_data_opts
from utils import *
from models import *
from train_cnn import train_cnn, test_cnn
from train_cdre import train_cdre
from eval_metrics import compute_FID, compute_IS
#######################################################################################
''' Settings '''
#######################################################################################
args = gen_synth_data_opts()
print(args)
## subsampling?
if args.subsampling:
subsampling_method = "sampling_cDR-RS_precnn_{}_lambda_{:.3f}_DR_{}_lambda_{:.3f}".format(args.dre_precnn_net, args.dre_precnn_lambda, args.dre_net, args.dre_lambda)
else:
subsampling_method = "sampling_None"
## filter??
if args.filter:
subsampling_method = subsampling_method + "_filter_{}_perc_{:.2f}".format(args.samp_filter_precnn_net, args.samp_filter_ce_percentile_threshold)
else:
subsampling_method = subsampling_method + "_filter_None"
## adjust labels??
subsampling_method = subsampling_method + "_adjust_{}".format(args.adjust)
# path_torch_home = os.path.join(args.root_path, 'torch_cache')
# os.makedirs(path_torch_home, exist_ok=True)
# os.environ['TORCH_HOME'] = path_torch_home
#-------------------------------
# GAN and DRE
dre_precnn_lr_decay_epochs = (args.dre_precnn_lr_decay_epochs).split("_")
dre_precnn_lr_decay_epochs = [int(epoch) for epoch in dre_precnn_lr_decay_epochs]
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# output folders
precnn_models_directory = os.path.join(args.root_path, 'output/precnn_models')
os.makedirs(precnn_models_directory, exist_ok=True)
output_directory = os.path.join(args.root_path, 'output/Setting_{}'.format(args.gan_net))
os.makedirs(output_directory, exist_ok=True)
save_models_folder = os.path.join(output_directory, 'saved_models')
os.makedirs(save_models_folder, exist_ok=True)
save_traincurves_folder = os.path.join(output_directory, 'training_curves')
os.makedirs(save_traincurves_folder, exist_ok=True)
save_evalresults_folder = os.path.join(output_directory, 'eval_results')
os.makedirs(save_evalresults_folder, exist_ok=True)
dump_fake_images_folder = os.path.join(args.root_path, 'fake_data')
os.makedirs(dump_fake_images_folder, exist_ok=True)
#######################################################################################
''' Load Data '''
#######################################################################################
## generate subset
trainset_h5py_file = os.path.join(args.data_path, 'ImageNet_128x128_100Class.h5')
hf = h5py.File(trainset_h5py_file, 'r')
images_train = hf['images_train'][:]
labels_train = hf['labels_train'][:]
images_test = hf['images_valid'][:]
labels_test = hf['labels_valid'][:]
hf.close()
### compute the mean and std for normalization
### Note that: In GAN-based KD, use computed mean and stds to normalize images for precnn training is better than using [0.5,0.5,0.5]
assert images_train.shape[1]==3
train_means = []
train_stds = []
for i in range(3):
images_i = images_train[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
# train_means = [0.5,0.5,0.5]
# train_stds = [0.5,0.5,0.5]
print("\n Training set shape: {}x{}x{}x{}; Testing set shape: {}x{}x{}x{}.".format(images_train.shape[0], images_train.shape[1], images_train.shape[2], images_train.shape[3], images_test.shape[0], images_test.shape[1], images_test.shape[2], images_test.shape[3]))
''' transformations '''
transform_precnn_train = transforms.Compose([
# transforms.RandomCrop((args.img_size, args.img_size), padding=4), ##may harm the performance
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
transform_dre = transforms.Compose([
# transforms.RandomCrop((args.img_size, args.img_size), padding=4), ## note that some GAN training does not involve cropping!!!
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5]), ##do not use other normalization constants!!!
])
# test set for cnn
transform_precnn_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
testset_precnn = IMGs_dataset(images_test, labels_test, transform=transform_precnn_test)
testloader_precnn = torch.utils.data.DataLoader(testset_precnn, batch_size=100, shuffle=False, num_workers=args.num_workers)
#######################################################################################
''' Load pre-trained GAN to Memory (not GPU) '''
#######################################################################################
ckpt_g = torch.load(args.gan_ckpt_path)
if args.gan_net=="BigGANdeep":
netG = BigGANdeep_Generator(G_ch=128, dim_z=args.gan_dim_g, resolution=args.img_size, G_attn='64', n_classes=args.num_classes, G_shared=True, shared_dim=128, hier=True)
netG.load_state_dict(ckpt_g)
netG = nn.DataParallel(netG)
else:
raise Exception("Not supported GAN!!")
def fn_sampleGAN_given_label(nfake, given_label, batch_size, pretrained_netG=netG, to_numpy=True, verbose=False, denorm=True):
raw_fake_images = []
raw_fake_labels = []
pretrained_netG = pretrained_netG.cuda()
pretrained_netG.eval()
if verbose:
pb = SimpleProgressBar()
with torch.no_grad():
tmp = 0
while tmp < nfake:
z = torch.randn(batch_size, args.gan_dim_g, dtype=torch.float).cuda()
labels = (given_label*torch.ones(batch_size)).type(torch.long).cuda()
batch_fake_images = pretrained_netG(z, pretrained_netG.module.shared(labels))
if denorm:
batch_fake_images = (batch_fake_images*0.5+0.5)*255.0
batch_fake_images = batch_fake_images.type(torch.uint8)
raw_fake_images.append(batch_fake_images.cpu())
raw_fake_labels.append(labels.cpu().view(-1))
tmp += batch_size
if verbose:
pb.update(np.min([float(tmp)*100/nfake,100]))
raw_fake_images = torch.cat(raw_fake_images, dim=0)
raw_fake_labels = torch.cat(raw_fake_labels)
if to_numpy:
raw_fake_images = raw_fake_images.numpy()
raw_fake_labels = raw_fake_labels.numpy()
return raw_fake_images[0:nfake], raw_fake_labels[0:nfake]
#######################################################################################
''' DRE Training '''
#######################################################################################
if args.subsampling:
##############################################
''' Pre-trained CNN for feature extraction '''
print("\n -----------------------------------------------------------------------------------------")
print("\n Pre-trained CNN for feature extraction")
# data loader
trainset_dre_precnn = IMGs_dataset(images_train, labels_train, transform=transform_precnn_train)
trainloader_dre_precnn = torch.utils.data.DataLoader(trainset_dre_precnn, batch_size=args.dre_precnn_batch_size_train, shuffle=True, num_workers=args.num_workers)
# Filename
filename_precnn_ckpt = precnn_models_directory + '/ckpt_PreCNNForDRE_{}_lambda_{}_epoch_{}_transform_{}_ntrain_{}_seed_{}.pth'.format(args.dre_precnn_net, args.dre_precnn_lambda, args.dre_precnn_epochs, args.dre_precnn_transform, args.ntrain, args.seed)
print('\n' + filename_precnn_ckpt)
path_to_ckpt_in_train = precnn_models_directory + '/ckpts_in_train_PreCNNForDRE_{}_lambda_{}_ntrain_{}_seed_{}'.format(args.dre_precnn_net, args.dre_precnn_lambda, args.ntrain, args.seed)
os.makedirs(path_to_ckpt_in_train, exist_ok=True)
# initialize cnn
dre_precnn_net = cnn_extract_initialization(args.dre_precnn_net, num_classes=args.num_classes)
num_parameters = count_parameters(dre_precnn_net)
# training
if not os.path.isfile(filename_precnn_ckpt):
print("\n Start training CNN for feature extraction in the DRE >>>")
dre_precnn_net = train_cnn(dre_precnn_net, 'PreCNNForDRE_{}'.format(args.dre_precnn_net), trainloader_dre_precnn, testloader_precnn, epochs=args.dre_precnn_epochs, resume_epoch=args.dre_precnn_resume_epoch, lr_base=args.dre_precnn_lr_base, lr_decay_factor=args.dre_precnn_lr_decay_factor, lr_decay_epochs=dre_precnn_lr_decay_epochs, weight_decay=args.dre_precnn_weight_decay, extract_feature=True, net_decoder=None, lambda_reconst=args.dre_precnn_lambda, train_means=train_means, train_stds=train_stds, path_to_ckpt = path_to_ckpt_in_train)
# store model
torch.save({
'net_state_dict': dre_precnn_net.state_dict(),
}, filename_precnn_ckpt)
print("\n End training CNN.")
else:
print("\n Loading pre-trained CNN for feature extraction in DRE.")
checkpoint = torch.load(filename_precnn_ckpt)
dre_precnn_net.load_state_dict(checkpoint['net_state_dict'])
#end if
# testing
_ = test_cnn(dre_precnn_net, testloader_precnn, extract_feature=True, verbose=True)
##############################################
''' cDRE Training '''
print("\n -----------------------------------------------------------------------------------------")
print("\n cDRE training")
### dataloader
trainset_dre = IMGs_dataset(images_train, labels_train, transform=transform_dre)
trainloader_dre = torch.utils.data.DataLoader(trainset_dre, batch_size=args.dre_batch_size, shuffle=True, num_workers=args.num_workers)
### dr model filename
drefile_fullpath = save_models_folder + "/ckpt_cDRE-F-cSP_precnn_{}_lambda_{:.3f}_DR_{}_lambda_{:.3f}_epochs_{}_ntrain_{}_seed_{}.pth".format(args.dre_precnn_net, args.dre_precnn_lambda, args.dre_net, args.dre_lambda, args.dre_epochs, args.ntrain, args.seed)
print('\n' + drefile_fullpath)
path_to_ckpt_in_train = save_models_folder + '/ckpt_cDRE-F-cSP_precnn_{}_lambda_{:.3f}_DR_{}_lambda_{:.3f}_ntrain_{}_seed_{}'.format(args.dre_precnn_net, args.dre_precnn_lambda, args.dre_net, args.dre_lambda, args.ntrain, args.seed)
os.makedirs(path_to_ckpt_in_train, exist_ok=True)
dre_loss_file_fullpath = save_traincurves_folder + '/train_loss_cDRE-F-cSP_precnn_{}_lambda_{:.3f}_DR_{}_epochs_{}_lambda_{}_ntrain_{}_seed_{}.png'.format(args.dre_precnn_net, args.dre_precnn_lambda, args.dre_net, args.dre_epochs, args.dre_lambda, args.ntrain, args.seed)
### dre training
dre_net = cDR_MLP(args.dre_net, p_dropout=0.5, init_in_dim = args.num_channels*args.img_size*args.img_size, num_classes = args.num_classes).cuda()
num_parameters_DR = count_parameters(dre_net)
dre_net = nn.DataParallel(dre_net)
#if DR model exists, then load the pretrained model; otherwise, start training the model.
if not os.path.isfile(drefile_fullpath):
print("\n Begin Training conditional DR in Feature Space: >>>")
dre_net, avg_train_loss = train_cdre(trainloader_dre, dre_net, dre_precnn_net, netG, path_to_ckpt=path_to_ckpt_in_train)
# save model
torch.save({
'net_state_dict': dre_net.state_dict(),
}, drefile_fullpath)
PlotLoss(avg_train_loss, dre_loss_file_fullpath)
else:
# if already trained, load pre-trained DR model
checkpoint_dre_net = torch.load(drefile_fullpath)
dre_net.load_state_dict(checkpoint_dre_net['net_state_dict'])
##end if not
# Compute density ratio: function for computing a bunch of images in a numpy array
def comp_cond_density_ratio(imgs, labels, batch_size=args.samp_batch_size):
#imgs: a torch tensor
n_imgs = len(imgs)
if batch_size>n_imgs:
batch_size = n_imgs
##make sure the last iteration has enough samples
imgs = torch.cat((imgs, imgs[0:batch_size]), dim=0)
labels = torch.cat((labels, labels[0:batch_size]), dim=0)
assert imgs.max().item()>1.0 ##make sure all images are not normalized
density_ratios = []
dre_net.eval()
dre_precnn_net.eval()
# print("\n Begin computing density ratio for images >>")
with torch.no_grad():
n_imgs_got = 0
while n_imgs_got < n_imgs:
batch_images = imgs[n_imgs_got:(n_imgs_got+batch_size)]
batch_images = (batch_images/255.0-0.5)/0.5 ## normalize
batch_labels = labels[n_imgs_got:(n_imgs_got+batch_size)]
batch_images = batch_images.type(torch.float).cuda()
batch_labels = batch_labels.type(torch.long).cuda()
_, batch_features = dre_precnn_net(batch_images)
batch_ratios = dre_net(batch_features, batch_labels)
density_ratios.append(batch_ratios.cpu().detach())
n_imgs_got += batch_size
### while n_imgs_got
density_ratios = torch.cat(density_ratios)
density_ratios = density_ratios[0:n_imgs].numpy()
return density_ratios
# Enhanced sampler based on the trained DR model
# Rejection Sampling:"Discriminator Rejection Sampling"; based on https://github.com/shinseung428/DRS_Tensorflow/blob/master/config.py
def fn_enhancedSampler_given_label(nfake, given_label, batch_size=args.samp_batch_size, verbose=True):
## Burn-in Stage
n_burnin = args.samp_burnin_size
burnin_imgs, burnin_labels = fn_sampleGAN_given_label(n_burnin, given_label, batch_size, to_numpy=False)
burnin_densityratios = comp_cond_density_ratio(burnin_imgs, burnin_labels)
# print((burnin_densityratios.min(),np.median(burnin_densityratios),burnin_densityratios.max()))
M_bar = np.max(burnin_densityratios)
del burnin_imgs, burnin_densityratios; gc.collect()
## Rejection sampling
enhanced_imgs = []
if verbose:
pb = SimpleProgressBar()
# pbar = tqdm(total=nfake)
num_imgs = 0
while num_imgs < nfake:
batch_imgs, batch_labels = fn_sampleGAN_given_label(batch_size, given_label, batch_size, to_numpy=False)
batch_ratios = comp_cond_density_ratio(batch_imgs, batch_labels)
M_bar = np.max([M_bar, np.max(batch_ratios)])
#threshold
batch_p = batch_ratios/M_bar
batch_psi = np.random.uniform(size=batch_size).reshape(-1,1)
indx_accept = np.where(batch_psi<=batch_p)[0]
if len(indx_accept)>0:
enhanced_imgs.append(batch_imgs[indx_accept])
num_imgs+=len(indx_accept)
del batch_imgs, batch_ratios; gc.collect()
if verbose:
pb.update(np.min([float(num_imgs)*100/nfake,100]))
# pbar.update(len(indx_accept))
# pbar.close()
enhanced_imgs = np.concatenate(enhanced_imgs, axis=0)
enhanced_imgs = enhanced_imgs[0:nfake]
return enhanced_imgs, given_label*np.ones(nfake)
#######################################################################################
''' Filtering by teacher '''
#######################################################################################
if args.filter or args.adjust:
## initialize pre-trained CNN for filtering
filter_precnn_net = model_dict[args.samp_filter_precnn_net](num_classes=args.num_classes)
## load ckpt
checkpoint = torch.load(args.samp_filter_precnn_net_ckpt_path)
filter_precnn_net.load_state_dict(checkpoint['model'])
filter_precnn_net = nn.DataParallel(filter_precnn_net)
def fn_filter_or_adjust(fake_images, fake_labels, filter_precnn_net=filter_precnn_net, filter=args.filter, adjust=args.adjust, CE_cutoff_point=1e30, burnin_mode=False, verbose=False, visualize_filtered_images=False, filtered_images_path=None):
#fake_images: numpy array
#fake_labels: numpy array
filter_precnn_net = filter_precnn_net.cuda()
filter_precnn_net.eval()
assert fake_images.max()>1.0 and fake_images.min()>=0
fake_dataset = IMGs_dataset(fake_images, fake_labels, transform=transform_precnn_train)
fake_dataloader = torch.utils.data.DataLoader(fake_dataset, batch_size=args.samp_filter_batch_size, shuffle=False)
## evaluate on fake data
fake_CE_loss = []
fake_labels_pred = []
criterion = nn.CrossEntropyLoss(reduction='none')
filter_precnn_net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
if verbose:
pbar = tqdm(total=len(fake_images))
with torch.no_grad():
correct = 0
total = 0
for _, (images, labels) in enumerate(fake_dataloader):
images = images.type(torch.float).cuda()
labels = labels.type(torch.long).cuda()
outputs = filter_precnn_net(images)
loss = criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
fake_labels_pred.append(predicted.cpu().numpy())
fake_CE_loss.append(loss.cpu().numpy())
if verbose:
pbar.update(len(images))
if verbose:
print('\n Test Accuracy of {} on the {} fake images: {:.3f} %'.format(args.samp_filter_precnn_net, len(fake_images), 100.0 * correct / total))
fake_CE_loss = np.concatenate(fake_CE_loss)
fake_labels_pred = np.concatenate(fake_labels_pred)
if not burnin_mode:
indx_label_diff = np.where(np.abs(fake_labels_pred-fake_labels)>1e-10)[0]
print('\r Class {}: adjust the labels of {}/{} images before the filtering.'.format(int(fake_labels[0]+1), len(indx_label_diff), len(fake_labels)))
filter_precnn_net = filter_precnn_net.cpu()
if filter:
if burnin_mode:
CE_cutoff_point = np.quantile(fake_CE_loss, q=args.samp_filter_ce_percentile_threshold)
print("\r Cut-off point for the filtering is {:.4f}. Test Accuracy of {} on the {} fake images: {:.3f} %.".format(CE_cutoff_point, args.samp_filter_precnn_net, len(fake_images), 100.0 * correct / total))
return CE_cutoff_point
if visualize_filtered_images: ## show some exampled filtered images
indx_drop = np.where(fake_CE_loss>=CE_cutoff_point)[0]
example_filtered_images = fake_images[indx_drop]
n_row = int(np.sqrt(min(100, len(example_filtered_images))))
example_filtered_images = example_filtered_images[0:n_row**2]
example_filtered_images = torch.from_numpy(example_filtered_images)
if example_filtered_images.max()>1.0:
example_filtered_images = example_filtered_images/255.0
filename_filtered_images = filtered_images_path + '/class_{}_dropped.png'.format(int(fake_labels[0]+1))
save_image(example_filtered_images.data, filename_filtered_images, nrow=n_row, normalize=True)
## do the filtering
indx_sel = np.where(fake_CE_loss<CE_cutoff_point)[0]
fake_images = fake_images[indx_sel]
fake_labels = fake_labels[indx_sel]
fake_labels_pred = fake_labels_pred[indx_sel] #adjust the labels of fake data by using the pre-trained big CNN
if visualize_filtered_images: ## show kept images as reference
example_selected_images = fake_images[0:n_row**2]
example_selected_images = torch.from_numpy(example_selected_images)
if example_selected_images.max()>1.0:
example_selected_images = example_selected_images/255.0
filename_filtered_images = filtered_images_path + '/class_{}_selected.png'.format(fake_labels[0]+1)
save_image(example_selected_images.data, filename_filtered_images, nrow=n_row, normalize=True)
if not burnin_mode:
indx_label_diff = np.where(np.abs(fake_labels_pred-fake_labels)>1e-10)[0]
print('\r Class {}: adjust the labels of {}/{} images after the filtering.'.format(int(fake_labels[0]+1), len(indx_label_diff), len(fake_labels)))
if adjust:
return fake_images, fake_labels_pred
else:
return fake_images, fake_labels
#######################################################################################
''' Final Sampler '''
#######################################################################################
def fn_final_sampler(nfake, label_i, batch_size=args.samp_batch_size, split_div=2, filter_nburnin=args.samp_filter_burnin_size, verbose=False, visualize_filtered_images=False, filtered_images_path=None):
if verbose:
pbar = tqdm(total=nfake)
### burning stage: compute the cut off point for the filtering
if args.filter:
if args.subsampling:
brunin_images, brunin_labels = fn_enhancedSampler_given_label(nfake=filter_nburnin, given_label=label_i, batch_size=batch_size, verbose=False)
else:
brunin_images, brunin_labels = fn_sampleGAN_given_label(nfake=filter_nburnin, given_label=label_i, batch_size=batch_size, verbose=False)
assert brunin_images.max()>1 and brunin_images.max()<=255.0
## compute the cut-off point
CE_cutoff_point = fn_filter_or_adjust(brunin_images, brunin_labels, burnin_mode=True, verbose=False)
fake_images_i = []
fake_labels_i = []
num_got = 0
while num_got<nfake:
if args.subsampling:
batch_images, batch_labels = fn_enhancedSampler_given_label(nfake=nfake//split_div, given_label=label_i, batch_size=batch_size, verbose=False)
else:
batch_images, batch_labels = fn_sampleGAN_given_label(nfake=nfake//split_div, given_label=label_i, batch_size=batch_size, verbose=False)
assert batch_images.max()>1 and batch_images.max()<=255.0
## filtering and adjustment
if args.filter or args.adjust:
batch_images, batch_labels = fn_filter_or_adjust(fake_images=batch_images, fake_labels=batch_labels, CE_cutoff_point=CE_cutoff_point, verbose=False, visualize_filtered_images=visualize_filtered_images, filtered_images_path=filtered_images_path)
num_got += len(batch_images)
fake_images_i.append(batch_images)
fake_labels_i.append(batch_labels)
if verbose:
pbar.update(len(batch_images))
##end while
fake_images_i = np.concatenate(fake_images_i, axis=0)
fake_labels_i = np.concatenate(fake_labels_i, axis=0)
fake_images_i = fake_images_i[0:nfake]
fake_labels_i = fake_labels_i[0:nfake]
return fake_images_i, fake_labels_i
###############################################################################
''' Generate fake data '''
###############################################################################
print("\n Geneating fake data: {}+{}...".format(args.gan_net, subsampling_method))
### generate fake images
dump_fake_images_filename = os.path.join(dump_fake_images_folder, 'imagenet100_fake_images_{}_{}_NfakePerClass_{}_seed_{}.h5'.format(args.gan_net, subsampling_method, args.samp_nfake_per_class, args.seed))
print(dump_fake_images_filename)
if args.visualize_filtered_images:
dump_filtered_fake_images_folder = os.path.join(dump_fake_images_folder, 'imagenet100_fake_images_{}_{}_NfakePerClass_{}_seed_{}_example_filtered_images'.format(args.gan_net, subsampling_method, args.samp_nfake_per_class, args.seed))
os.makedirs(dump_filtered_fake_images_folder, exist_ok=True)
else:
dump_filtered_fake_images_folder=None
if not os.path.isfile(dump_fake_images_filename):
print('\n Start generating fake data...')
fake_images = []
fake_labels = []
start_time = timeit.default_timer()
for i in range(args.num_classes):
print("\n Generate {} fake images for class {}/{}.".format(args.samp_nfake_per_class, i+1, args.num_classes))
fake_images_i, fake_labels_i = fn_final_sampler(nfake=args.samp_nfake_per_class, label_i=i, visualize_filtered_images=args.visualize_filtered_images, filtered_images_path=dump_filtered_fake_images_folder)
fake_images.append(fake_images_i)
fake_labels.append(fake_labels_i.reshape(-1))
print("\n End generating {} fake images for class {}/{}. Time elapse: {:.3f}".format(args.samp_nfake_per_class, i+1, args.num_classes, timeit.default_timer()-start_time))
fake_images = np.concatenate(fake_images, axis=0)
fake_labels = np.concatenate(fake_labels, axis=0)
del fake_images_i, fake_labels_i; gc.collect()
print('\n End generating fake data!')
with h5py.File(dump_fake_images_filename, "w") as f:
f.create_dataset('fake_images', data = fake_images, dtype='uint8', compression="gzip", compression_opts=6)
f.create_dataset('fake_labels', data = fake_labels, dtype='int')
else:
print('\n Start loading generated fake data...')
with h5py.File(dump_fake_images_filename, "r") as f:
fake_images = f['fake_images'][:]
fake_labels = f['fake_labels'][:]
assert len(fake_images) == len(fake_labels)
### visualize data distribution
frequencies = []
for i in range(args.num_classes):
indx_i = np.where(fake_labels==i)[0]
frequencies.append(len(indx_i))
frequencies = np.array(frequencies)
width = 0.8
x = np.arange(1,args.num_classes+1)
# plot data in grouped manner of bar type
fig, ax = plt.subplots(1,1, figsize=(6,4))
ax.grid(color='lightgrey', linestyle='--', zorder=0)
ax.bar(x, frequencies, width, align='center', color='tab:green', zorder=3)
ax.set_xlabel("Class")
ax.set_ylabel("Frequency")
plt.tight_layout()
plt.savefig(os.path.join(dump_fake_images_folder, "imagenet100_fake_images_{}_{}_NfakePerClass_{}_class_dist.pdf".format(args.gan_net, subsampling_method, args.samp_nfake_per_class)))
plt.close()
print('\n Frequence of each class: MIN={}, MEAN={}, MAX={}.'.format(np.min(frequencies),np.mean(frequencies),np.max(frequencies)))
###############################################################################
''' Compute FID and IS '''
###############################################################################
if args.eval:
#load pre-trained InceptionV3 (pretrained on CIFAR-100)
PreNetFID = Inception3(num_classes=args.num_classes, aux_logits=True, transform_input=False)
checkpoint_PreNet = torch.load(args.eval_ckpt_path)
PreNetFID = nn.DataParallel(PreNetFID).cuda()
PreNetFID.load_state_dict(checkpoint_PreNet['net_state_dict'])
##############################################
''' Compute FID between real and fake images '''
start = timeit.default_timer()
## normalize images
assert fake_images.max()>1
fake_images = (fake_images/255.0-0.5)/0.5
assert images_train.max()>1
images_train = (images_train/255.0-0.5)/0.5
assert -1.0<=images_train.max()<=1.0 and -1.0<=images_train.min()<=1.0
#####################
## Compute Intra-FID: real vs fake
print("\n Start compute Intra-FID between real and fake images...")
start_time = timeit.default_timer()
intra_fid_scores = np.zeros(args.num_classes)
for i in range(args.num_classes):
indx_train_i = np.where(labels_train==i)[0]
images_train_i = images_train[indx_train_i]
indx_fake_i = np.where(fake_labels==i)[0]
fake_images_i = fake_images[indx_fake_i]
##compute FID within each class
intra_fid_scores[i] = compute_FID(PreNetFID, images_train_i, fake_images_i, batch_size = args.eval_FID_batch_size, resize = (299, 299))
print("\r Evaluating: Class:{}; Real:{}; Fake:{}; FID:{}; Time elapses:{}s.".format(i+1, len(images_train_i), len(fake_images_i), intra_fid_scores[i], timeit.default_timer()-start_time))
##end for i
# average over all classes
print("\n Evaluating: Intra-FID: {}({}); min/max: {}/{}.".format(np.mean(intra_fid_scores), np.std(intra_fid_scores), np.min(intra_fid_scores), np.max(intra_fid_scores)))
# dump FID versus class to npy
dump_fids_filename = save_evalresults_folder + "/{}_subsampling_{}_fids".format(args.gan_net, subsampling_method)
np.savez(dump_fids_filename, fids=intra_fid_scores)
#####################
## Compute FID: real vs fake
print("\n Start compute FID between real and fake images...")
indx_shuffle_real = np.arange(len(images_train)); np.random.shuffle(indx_shuffle_real)
indx_shuffle_fake = np.arange(len(fake_images)); np.random.shuffle(indx_shuffle_fake)
fid_score = compute_FID(PreNetFID, images_train[indx_shuffle_real], fake_images[indx_shuffle_fake], batch_size = args.eval_FID_batch_size, resize = (299, 299))
print("\n Evaluating: FID between {} real and {} fake images: {}.".format(len(images_train), len(fake_images), fid_score))
#####################
## Compute IS
print("\n Start compute IS of fake images...")
indx_shuffle_fake = np.arange(len(fake_images)); np.random.shuffle(indx_shuffle_fake)
is_score, is_score_std = compute_IS(PreNetFID, fake_images[indx_shuffle_fake], batch_size = args.eval_FID_batch_size, splits=10, resize=(299,299))
print("\n Evaluating: IS of {} fake images: {}({}).".format(len(fake_images), is_score, is_score_std))
#####################
# Dump evaluation results
eval_results_fullpath = os.path.join(save_evalresults_folder, '{}_subsampling_{}.txt'.format(args.gan_net, subsampling_method))
if not os.path.isfile(eval_results_fullpath):
eval_results_logging_file = open(eval_results_fullpath, "w")
eval_results_logging_file.close()
with open(eval_results_fullpath, 'a') as eval_results_logging_file:
eval_results_logging_file.write("\n===================================================================================================")
eval_results_logging_file.write("\n Separate results for Subsampling {} \n".format(subsampling_method))
print(args, file=eval_results_logging_file)
eval_results_logging_file.write("\n Intra-FID: {}({}); min/max: {}/{}.".format(np.mean(intra_fid_scores), np.std(intra_fid_scores), np.min(intra_fid_scores), np.max(intra_fid_scores)))
eval_results_logging_file.write("\n FID: {}.".format(fid_score))
eval_results_logging_file.write("\n IS: {}({}).".format(is_score, is_score_std))
#######################################################################################
''' Visualize fake images of the trained GAN '''
#######################################################################################
if args.visualize_fake_images:
# First, visualize conditional generation # vertical grid
## 10 rows; 10 columns (10 samples for each class)
n_row = args.num_classes
n_col = 10
fake_images_view = []
fake_labels_view = []
for i in range(args.num_classes):
fake_labels_i = i*np.ones(n_col)
if args.subsampling:
fake_images_i, _ = fn_enhancedSampler_given_label(nfake=n_col, given_label=i, batch_size=100, verbose=False)
else:
fake_images_i, _ = fn_sampleGAN_given_label(nfake=n_col, given_label=i, batch_size=100, pretrained_netG=netG, to_numpy=True)
fake_images_view.append(fake_images_i)
fake_labels_view.append(fake_labels_i)
##end for i
fake_images_view = np.concatenate(fake_images_view, axis=0)
fake_labels_view = np.concatenate(fake_labels_view, axis=0)
### output fake images from a trained GAN
filename_fake_images = save_evalresults_folder + '/{}_subsampling_{}_fake_image_grid_{}x{}.png'.format(args.gan_net, subsampling_method, n_row, n_col)
images_show = np.zeros((n_row*n_col, args.num_channels, args.img_size, args.img_size))
for i_row in range(n_row):
indx_i = np.where(fake_labels_view==i_row)[0]
for j_col in range(n_col):
curr_image = fake_images_view[indx_i[j_col]]
images_show[i_row*n_col+j_col,:,:,:] = curr_image
images_show = torch.from_numpy(images_show)
save_image(images_show.data, filename_fake_images, nrow=n_col, normalize=True)
### end if args.visualize_fake_images
print("\n ===================================================================================================") | 34,203 | 49.3 | 548 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/eval_metrics.py | """
Compute
Inception Score (IS),
Frechet Inception Discrepency (FID), ref "https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py"
Maximum Mean Discrepancy (MMD)
for a set of fake images
use numpy array
Xr: high-level features for real images; nr by d array
Yr: labels for real images
Xg: high-level features for fake images; ng by d array
Yg: labels for fake images
IMGSr: real images
IMGSg: fake images
"""
import gc
import numpy as np
from numpy import linalg as LA
from scipy import linalg
import torch
import torch.nn as nn
from scipy.stats import entropy
from torch.nn import functional as F
import sys
# ################################################################################
# Progress Bar
class SimpleProgressBar():
def __init__(self, width=50):
self.last_x = -1
self.width = width
def update(self, x):
assert 0 <= x <= 100 # `x`: progress in percent ( between 0 and 100)
if self.last_x == int(x): return
self.last_x = int(x)
pointer = int(self.width * (x / 100.0))
sys.stdout.write( '\r%d%% [%s]' % (int(x), '#' * pointer + '.' * (self.width - pointer)))
sys.stdout.flush()
if x == 100:
print('')
##############################################################################
# FID scores
##############################################################################
# compute FID based on extracted features
def FID_(Xr, Xg, eps=1e-10):
'''
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
'''
#sample mean
MUr = np.mean(Xr, axis = 0)
MUg = np.mean(Xg, axis = 0)
mean_diff = LA.norm( MUr - MUg )
#sample covariance
SIGMAr = np.cov(Xr.transpose())
SIGMAg = np.cov(Xg.transpose())
# Product might be almost singular
covmean, _ = linalg.sqrtm(SIGMAr.dot(SIGMAg), disp=False)#square root of a matrix
covmean = covmean.real
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(SIGMAr.shape[0]) * eps
covmean = linalg.sqrtm((SIGMAr + offset).dot(SIGMAg + offset))
#fid score
fid_score = mean_diff + np.trace(SIGMAr + SIGMAg - 2*covmean)
return fid_score
##test
#Xr = np.random.rand(10000,1000)
#Xg = np.random.rand(10000,1000)
#print(FID_(Xr, Xg))
# compute FID from raw images
def compute_FID(PreNetFID, IMGSr, IMGSg, batch_size = 500, resize = None, verbose = False):
#resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
PreNetFID = PreNetFID.cuda()
PreNetFID.eval()
nr = IMGSr.shape[0]
ng = IMGSg.shape[0]
if batch_size>min(nr,ng):
batch_size = min(nr,ng)
#compute the length of extracted features
with torch.no_grad():
test_img = torch.from_numpy(IMGSr[0:2].reshape((2,IMGSr.shape[1],IMGSr.shape[2],IMGSr.shape[3]))).type(torch.float).cuda()
if resize is not None:
test_img = nn.functional.interpolate(test_img, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
_, test_features = PreNetFID(test_img)
d = test_features.shape[1] #length of extracted features
Xr = np.zeros((nr, d))
Xg = np.zeros((ng, d))
with torch.no_grad():
tmp = 0
if verbose:
pb1 = SimpleProgressBar()
for i in range(nr//batch_size):
imgr_tensor = torch.from_numpy(IMGSr[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgr_tensor = nn.functional.interpolate(imgr_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
_, Xr_tmp = PreNetFID(imgr_tensor)
Xr[tmp:(tmp+batch_size)] = Xr_tmp.detach().cpu().numpy()
tmp+=batch_size
if verbose:
pb1.update(min(100,float(i+1)*100/(nr//batch_size)))
del Xr_tmp,imgr_tensor; gc.collect()
torch.cuda.empty_cache()
tmp = 0
if verbose:
pb2 = SimpleProgressBar()
for j in range(ng//batch_size):
imgg_tensor = torch.from_numpy(IMGSg[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgg_tensor = nn.functional.interpolate(imgg_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
_, Xg_tmp = PreNetFID(imgg_tensor)
Xg[tmp:(tmp+batch_size)] = Xg_tmp.detach().cpu().numpy()
tmp+=batch_size
if verbose:
pb2.update(min(100,float(j+1)*100/(ng//batch_size)))
del Xg_tmp,imgg_tensor; gc.collect()
torch.cuda.empty_cache()
PreNetFID = PreNetFID.cpu()
fid_score = FID_(Xr, Xg, eps=1e-6)
return fid_score
##############################################################################
# Inception Scores
##############################################################################
def compute_IS(PreNetIS, IMGSg, batch_size = 500, splits=1, resize = None, verbose=False):
#resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
PreNetIS = PreNetIS.cuda()
PreNetIS.eval()
N = IMGSg.shape[0]
#compute the number of classes
with torch.no_grad():
test_img = torch.from_numpy(IMGSg[0:2].reshape((2,IMGSg.shape[1],IMGSg.shape[2],IMGSg.shape[3]))).type(torch.float).cuda()
if resize is not None:
test_img = nn.functional.interpolate(test_img, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
test_output, _ = PreNetIS(test_img)
nc = test_output.shape[1] #number of classes
# Get predictions
def get_pred(x):
x, _ = PreNetIS(x)
return F.softmax(x,dim=1).data.cpu().numpy()
preds = np.zeros((N, nc))
with torch.no_grad():
tmp = 0
if verbose:
pb = SimpleProgressBar()
for j in range(N//batch_size):
imgg_tensor = torch.from_numpy(IMGSg[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgg_tensor = nn.functional.interpolate(imgg_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
preds[tmp:(tmp+batch_size)] = get_pred(imgg_tensor)
tmp+=batch_size
if verbose:
pb.update(min(100,float(j+1)*100/(N//batch_size)))
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits): (k+1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
PreNetIS = PreNetIS.cpu()
return np.mean(split_scores), np.std(split_scores)
| 7,055 | 33.758621 | 140 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/utils.py | import numpy as np
import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch.nn import functional as F
import sys
import PIL
from PIL import Image
### import my stuffs ###
from models import *
# ################################################################################
# Progress Bar
class SimpleProgressBar():
def __init__(self, width=50):
self.last_x = -1
self.width = width
def update(self, x):
assert 0 <= x <= 100 # `x`: progress in percent ( between 0 and 100)
if self.last_x == int(x): return
self.last_x = int(x)
pointer = int(self.width * (x / 100.0))
sys.stdout.write( '\r%d%% [%s]' % (int(x), '#' * pointer + '.' * (self.width - pointer)))
sys.stdout.flush()
if x == 100:
print('')
################################################################################
# torch dataset from numpy array
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, transform=None):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.transform = transform
def __getitem__(self, index):
## for RGB only
image = self.images[index]
if self.transform is not None:
image = np.transpose(image, (1, 2, 0)) #C * H * W ----> H * W * C
image = Image.fromarray(np.uint8(image), mode = 'RGB') #H * W * C
image = self.transform(image)
if self.labels is not None:
label = self.labels[index]
return image, label
return image
def __len__(self):
return self.n_images
class IMGs_dataset2(torch.utils.data.Dataset):
def __init__(self, images, labels=None, normalize=False):
super(IMGs_dataset2, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.normalize = normalize
def __getitem__(self, index):
image = self.images[index]
if self.normalize:
image = image/255.0
image = (image-0.5)/0.5
if self.labels is not None:
label = self.labels[index]
return (image, label)
else:
return image
def __len__(self):
return self.n_images
################################################################################
def PlotLoss(loss, filename):
x_axis = np.arange(start = 1, stop = len(loss)+1)
plt.switch_backend('agg')
mpl.style.use('seaborn')
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x_axis, np.array(loss))
plt.xlabel("epoch")
plt.ylabel("training loss")
plt.legend()
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), shadow=True, ncol=3)
#plt.title('Training Loss')
plt.savefig(filename)
################################################################################
# Convenience function to count the number of parameters in a module
def count_parameters(module, verbose=True):
num_parameters = sum([p.data.nelement() for p in module.parameters()])
if verbose:
print('Number of parameters: {}'.format(num_parameters))
return num_parameters
################################################################################
# Init. CNNs
def cnn_extract_initialization(cnn_name, num_classes=100):
if cnn_name == "ResNet34":
net = ResNet34_extract(num_classes=num_classes)
else:
raise Exception("not supported!!")
net = nn.DataParallel(net)
net = net.cuda()
return net
################################################################################
# predict class labels
def predict_labels(net, images, batch_size=500, normalize_imgs=False, verbose=False, num_workers=0):
net = net.cuda()
net.eval()
n = len(images)
if batch_size>n:
batch_size=n
dataset_pred = IMGs_dataset2(images, normalize=normalize_imgs)
dataloader_pred = torch.utils.data.DataLoader(dataset_pred, batch_size=batch_size, shuffle=False, num_workers=num_workers)
class_labels_pred = np.zeros(n+batch_size)
with torch.no_grad():
nimgs_got = 0
if verbose:
pb = SimpleProgressBar()
for batch_idx, batch_images in enumerate(dataloader_pred):
batch_images = batch_images.type(torch.float).cuda()
batch_size_curr = len(batch_images)
outputs,_ = net(batch_images)
_, batch_class_labels_pred = torch.max(outputs.data, 1)
class_labels_pred[nimgs_got:(nimgs_got+batch_size_curr)] = batch_class_labels_pred.detach().cpu().numpy().reshape(-1)
nimgs_got += batch_size_curr
if verbose:
pb.update((float(nimgs_got)/n)*100)
#end for batch_idx
class_labels_pred = class_labels_pred[0:n]
return class_labels_pred
| 5,464 | 30.959064 | 143 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/train_cdre.py | '''
Functions for Training Class-conditional Density-ratio model
'''
import torch
import torch.nn as nn
import numpy as np
import os
import timeit
from utils import SimpleProgressBar
from opts import gen_synth_data_opts
''' Settings '''
args = gen_synth_data_opts()
# training function
def train_cdre(trainloader, dre_net, dre_precnn_net, netG, path_to_ckpt=None):
# some parameters in the opts
dim_gan = args.gan_dim_g
dre_net_name = args.dre_net
dre_epochs = args.dre_epochs
dre_lr_base = args.dre_lr_base
dre_lr_decay_factor = args.dre_lr_decay_factor
dre_lr_decay_epochs = (args.dre_lr_decay_epochs).split("_")
dre_lr_decay_epochs = [int(epoch) for epoch in dre_lr_decay_epochs]
dre_lambda = args.dre_lambda
dre_resume_epoch = args.dre_resume_epoch
''' learning rate decay '''
def adjust_learning_rate(optimizer, epoch):
lr = dre_lr_base
num_decays = len(dre_lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= dre_lr_decay_epochs[decay_i]:
lr = lr * dre_lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#end def adjust lr
# nets
dre_precnn_net = dre_precnn_net.cuda()
netG = netG.cuda()
dre_precnn_net.eval()
netG.eval()
dre_net = dre_net.cuda()
# define optimizer
optimizer = torch.optim.Adam(dre_net.parameters(), lr = dre_lr_base, betas=(0.5, 0.999), weight_decay=1e-4)
if path_to_ckpt is not None and dre_resume_epoch>0:
print("Loading ckpt to resume training dre_net >>>")
ckpt_fullpath = path_to_ckpt + "/cDRE_{}_checkpoint_epoch_{}.pth".format(dre_net_name, dre_resume_epoch)
checkpoint = torch.load(ckpt_fullpath)
dre_net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#load d_loss and g_loss
logfile_fullpath = path_to_ckpt + "/cDRE_{}_train_loss_epoch_{}.npz".format(dre_net_name, dre_resume_epoch)
if os.path.isfile(logfile_fullpath):
avg_train_loss = list(np.load(logfile_fullpath))
else:
avg_train_loss = []
else:
avg_train_loss = []
start_time = timeit.default_timer()
for epoch in range(dre_resume_epoch, dre_epochs):
adjust_learning_rate(optimizer, epoch)
train_loss = 0
for batch_idx, (batch_real_images, batch_real_labels) in enumerate(trainloader):
dre_net.train()
batch_size_curr = batch_real_images.shape[0]
batch_real_images = batch_real_images.type(torch.float).cuda()
num_unique_classes = len(list(set(batch_real_labels.numpy())))
batch_real_labels = batch_real_labels.type(torch.long).cuda()
with torch.no_grad():
z = torch.randn(batch_size_curr, dim_gan, dtype=torch.float).cuda()
batch_fake_images = netG(z, batch_real_labels)
batch_fake_images = batch_fake_images.detach()
_, batch_features_real = dre_precnn_net(batch_real_images)
batch_features_real = batch_features_real.detach()
_, batch_features_fake = dre_precnn_net(batch_fake_images)
batch_features_fake = batch_features_fake.detach()
# density ratios for real and fake images
DR_real = dre_net(batch_features_real, batch_real_labels)
DR_fake = dre_net(batch_features_fake, batch_real_labels)
#Softplus loss
softplus_fn = torch.nn.Softplus(beta=1,threshold=20)
sigmoid_fn = torch.nn.Sigmoid()
SP_div = torch.mean(sigmoid_fn(DR_fake) * DR_fake) - torch.mean(softplus_fn(DR_fake)) - torch.mean(sigmoid_fn(DR_real))
#penalty term: prevent assigning zero to all fake image
# penalty = dre_lambda * (torch.mean(DR_fake)/num_unique_classes - 1)**2
# loss = SP_div/num_unique_classes + penalty
penalty = dre_lambda * (torch.mean(DR_fake) - 1)**2
loss = SP_div + penalty
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
# end for batch_idx
print("cDRE+{}+lambda{}: [epoch {}/{}] [train loss {}] [Time {}]".format(dre_net_name, dre_lambda, epoch+1, dre_epochs, train_loss/(batch_idx+1), timeit.default_timer()-start_time))
avg_train_loss.append(train_loss/(batch_idx+1))
# save checkpoint
if path_to_ckpt is not None and ((epoch+1) % 50 == 0 or (epoch+1)==dre_epochs):
ckpt_fullpath = path_to_ckpt + "/cDRE_{}_checkpoint_epoch_{}.pth".format(dre_net_name, epoch+1)
torch.save({
'epoch': epoch,
'net_state_dict': dre_net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, ckpt_fullpath)
# save loss
logfile_fullpath = path_to_ckpt + "/cDRE_{}_train_loss_epoch_{}.npz".format(dre_net_name, epoch+1)
np.savez(logfile_fullpath, np.array(avg_train_loss))
#end for epoch
netG = netG.cpu() #back to memory
return dre_net, avg_train_loss
#end for def
| 5,355 | 35.435374 | 189 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/train_cnn.py | ''' For CNN training and testing. '''
import os
import timeit
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
def denorm(x, means, stds):
'''
x: torch tensor
means: means for normalization
stds: stds for normalization
'''
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (stds[0] / 0.5) + (means[0] - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (stds[1] / 0.5) + (means[1] - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (stds[2] / 0.5) + (means[2] - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
''' function for cnn training '''
def train_cnn(net, net_name, trainloader, testloader, epochs, resume_epoch=0, save_freq=[100, 150], lr_base=0.1, lr_decay_factor=0.1, lr_decay_epochs=[150, 250], weight_decay=1e-4, extract_feature=False, net_decoder=None, lambda_reconst=0, train_means=None, train_stds=None, path_to_ckpt = None):
''' learning rate decay '''
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate """
lr = lr_base
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
net = net.cuda()
criterion = nn.CrossEntropyLoss()
params = list(net.parameters())
if lambda_reconst>0 and net_decoder is not None:
net_decoder = net_decoder.cuda()
criterion_reconst = nn.MSELoss()
params += list(net_decoder.parameters())
optimizer = torch.optim.SGD(params, lr = lr_base, momentum= 0.9, weight_decay=weight_decay)
# optimizer = torch.optim.Adam(params, lr = lr_base, betas=(0, 0.999), weight_decay=weight_decay)
if path_to_ckpt is not None and resume_epoch>0:
save_file = path_to_ckpt + "/{}_checkpoint_epoch_{}.pth".format(net_name, resume_epoch)
if lambda_reconst>0 and net_decoder is not None:
checkpoint = torch.load(save_file)
net.load_state_dict(checkpoint['net_state_dict'])
net_decoder.load_state_dict(checkpoint['net_decoder_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
else:
checkpoint = torch.load(save_file)
net.load_state_dict(checkpoint['net_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
start_time = timeit.default_timer()
for epoch in range(resume_epoch, epochs):
net.train()
train_loss = 0
train_loss_const = 0
adjust_learning_rate(optimizer, epoch)
for batch_idx, (batch_train_images, batch_train_labels) in enumerate(trainloader):
batch_train_images = batch_train_images.type(torch.float).cuda()
batch_train_labels = batch_train_labels.type(torch.long).cuda()
#Forward pass
if lambda_reconst>0 and net_decoder is not None and extract_feature:
outputs, features = net(batch_train_images)
batch_reconst_images = net_decoder(features)
class_loss = criterion(outputs, batch_train_labels)
if train_means and train_stds:
batch_train_images = denorm(batch_train_images, train_means, train_stds) #decoder use different normalization constants
reconst_loss = criterion_reconst(batch_reconst_images, batch_train_images)
loss = class_loss + lambda_reconst * reconst_loss
else:
if extract_feature:
outputs, _ = net(batch_train_images)
else:
outputs = net(batch_train_images)
## standard CE loss
loss = criterion(outputs, batch_train_labels)
#backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
if lambda_reconst>0 and net_decoder is not None and extract_feature:
train_loss_const += reconst_loss.cpu().item()
#end for batch_idx
test_acc = test_cnn(net, testloader, extract_feature, verbose=False)
if lambda_reconst>0 and net_decoder is not None and extract_feature:
print("{}, lambda:{:.3f}: [epoch {}/{}] train_loss:{:.3f}, reconst_loss:{:.3f}, test_acc:{:.3f} Time:{:.4f}".format(net_name, lambda_reconst, epoch+1, epochs, train_loss/(batch_idx+1), train_loss_const/(batch_idx+1), test_acc, timeit.default_timer()-start_time))
else:
print('%s: [epoch %d/%d] train_loss:%.3f, test_acc:%.3f Time: %.4f' % (net_name, epoch+1, epochs, train_loss/(batch_idx+1), test_acc, timeit.default_timer()-start_time))
# save checkpoint
if path_to_ckpt is not None and ((epoch+1) in save_freq or (epoch+1) == epochs) :
save_file = path_to_ckpt + "/{}_checkpoint_epoch_{}.pth".format(net_name, epoch+1)
if lambda_reconst>0 and net_decoder is not None:
torch.save({
'net_state_dict': net.state_dict(),
'net_decoder_state_dict': net_decoder.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
else:
torch.save({
'net_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for epoch
return net
def test_cnn(net, testloader, extract_feature=False, verbose=False):
net = net.cuda()
net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for batch_idx, (images, labels) in enumerate(testloader):
images = images.type(torch.float).cuda()
labels = labels.type(torch.long).cuda()
if extract_feature:
outputs,_ = net(images)
else:
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if verbose:
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100.0 * correct / total))
return 100.0 * correct / total
| 6,774 | 42.152866 | 296 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/BigGANdeep.py | import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
from models import layers
# import layers
# from sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d
# from DiffAugment_pytorch import DiffAugment
# BigGAN-deep: uses a different resblock and pattern
# Architectures for G
# Attention is passed in in the format '32_64' to mean applying an attention
# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.
# Channel ratio is the ratio of
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels,
which_conv=nn.Conv2d, which_bn=layers.bn, activation=None,
upsample=None, channel_ratio=4):
super(GBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.hidden_channels = self.in_channels // channel_ratio
self.which_conv, self.which_bn = which_conv, which_bn
self.activation = activation
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.hidden_channels,
kernel_size=1, padding=0)
self.conv2 = self.which_conv(self.hidden_channels, self.hidden_channels)
self.conv3 = self.which_conv(self.hidden_channels, self.hidden_channels)
self.conv4 = self.which_conv(self.hidden_channels, self.out_channels,
kernel_size=1, padding=0)
# Batchnorm layers
self.bn1 = self.which_bn(self.in_channels)
self.bn2 = self.which_bn(self.hidden_channels)
self.bn3 = self.which_bn(self.hidden_channels)
self.bn4 = self.which_bn(self.hidden_channels)
# upsample layers
self.upsample = upsample
def forward(self, x, y):
# Project down to channel ratio
h = self.conv1(self.activation(self.bn1(x, y)))
# Apply next BN-ReLU
h = self.activation(self.bn2(h, y))
# Drop channels in x if necessary
if self.in_channels != self.out_channels:
x = x[:, :self.out_channels]
# Upsample both h and x at this point
if self.upsample:
h = self.upsample(h)
x = self.upsample(x)
# 3x3 convs
h = self.conv2(h)
h = self.conv3(self.activation(self.bn3(h, y)))
# Final 1x1 conv
h = self.conv4(self.activation(self.bn4(h, y)))
return h + x
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample' : [True] * 6,
'resolution' : [8, 16, 32, 64, 128, 256],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,9)}}
arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],
'upsample' : [True] * 5,
'resolution' : [8, 16, 32, 64, 128],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,8)}}
arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],
'out_channels' : [ch * item for item in [16, 8, 4, 2]],
'upsample' : [True] * 4,
'resolution' : [8, 16, 32, 64],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,7)}}
arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],
'out_channels' : [ch * item for item in [4, 4, 4]],
'upsample' : [True] * 3,
'resolution' : [8, 16, 32],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,6)}}
return arch
class BigGANdeep_Generator(nn.Module):
def __init__(self, G_ch=64, G_depth=2, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(BigGANdeep_Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Number of resblocks per stage
self.G_depth = G_depth
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.which_embedding = nn.Embedding
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
else self.which_embedding)
self.which_bn = functools.partial(layers.ccbn,
which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
input_size=(self.shared_dim + self.dim_z if self.G_shared
else self.n_classes),
norm_style=self.norm_style,
eps=self.BN_eps)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
self.linear = self.which_linear(self.dim_z + self.shared_dim, self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['in_channels'][index] if g_index==0 else self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] and g_index == (self.G_depth-1) else None))]
for g_index in range(self.G_depth)]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
# NOTE: The z vs y dichotomy here is for compatibility with not-y
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
z = torch.cat([y, z], 1)
y = z
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Second inner loop in case block has multiple layers
for block in blocklist:
h = block(h, y)
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels, which_conv=layers.SNConv2d, wide=True,
preactivation=True, activation=None, downsample=None,
channel_ratio=4):
super(DBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
# If using wide D (as in SA-GAN and BigGAN), change the channel pattern
self.hidden_channels = self.out_channels // channel_ratio
self.which_conv = which_conv
self.preactivation = preactivation
self.activation = activation
self.downsample = downsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.hidden_channels,
kernel_size=1, padding=0)
self.conv2 = self.which_conv(self.hidden_channels, self.hidden_channels)
self.conv3 = self.which_conv(self.hidden_channels, self.hidden_channels)
self.conv4 = self.which_conv(self.hidden_channels, self.out_channels,
kernel_size=1, padding=0)
self.learnable_sc = True if (in_channels != out_channels) else False
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels - in_channels,
kernel_size=1, padding=0)
def shortcut(self, x):
if self.downsample:
x = self.downsample(x)
if self.learnable_sc:
x = torch.cat([x, self.conv_sc(x)], 1)
return x
def forward(self, x):
# 1x1 bottleneck conv
h = self.conv1(F.relu(x))
# 3x3 convs
h = self.conv2(self.activation(h))
h = self.conv3(self.activation(h))
# relu before downsample
h = self.activation(h)
# downsample
if self.downsample:
h = self.downsample(h)
# final 1x1 conv
h = self.conv4(h)
return h + self.shortcut(x)
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16]],
'out_channels' : [item * ch for item in [2, 4, 8, 8, 16, 16]],
'downsample' : [True] * 6 + [False],
'resolution' : [128, 64, 32, 16, 8, 4, 4 ],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[128] = {'in_channels' : [item * ch for item in [1, 2, 4, 8, 16]],
'out_channels' : [item * ch for item in [2, 4, 8, 16, 16]],
'downsample' : [True] * 5 + [False],
'resolution' : [64, 32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[64] = {'in_channels' : [item * ch for item in [1, 2, 4, 8]],
'out_channels' : [item * ch for item in [2, 4, 8, 16]],
'downsample' : [True] * 4 + [False],
'resolution' : [32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,7)}}
arch[32] = {'in_channels' : [item * ch for item in [4, 4, 4]],
'out_channels' : [item * ch for item in [4, 4, 4]],
'downsample' : [True, True, False, False],
'resolution' : [16, 16, 16, 16],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,6)}}
return arch
class BigGANdeep_Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, D_depth=2, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', **kwargs):
super(BigGANdeep_Discriminator, self).__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# How many resblocks per stage?
self.D_depth = D_depth
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
# Architecture
self.arch = D_arch(self.ch, self.attention)[resolution]
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# Stem convolution
self.input_conv = self.which_conv(3, self.arch['in_channels'][0])
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[DBlock(in_channels=self.arch['in_channels'][index] if d_index==0 else self.arch['out_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=True,
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] and d_index==0 else None))
for d_index in range(self.D_depth)]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
# Embedding for projection discrimination
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Run input conv
h = self.input_conv(x)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
# Apply global sum pooling as in SN-GAN
h = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
out = self.linear(h)
# Get projection of final featureset onto class vectors and add to evidence
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out
# # Parallelized G_D to minimize cross-gpu communication
# # Without this, Generator outputs would get all-gathered and then rebroadcast.
# class G_D(nn.Module):
# def __init__(self, G, D, DiffAugment_policy='color,translation,cutout'):
# super(G_D, self).__init__()
# self.G = G
# self.D = D
# self.DiffAugment_policy = DiffAugment_policy
# if self.DiffAugment_policy != 'None':
# print('\n Use DiffAugment in the BigGAN training...')
# def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
# split_D=False):
# # If training G, enable grad tape
# with torch.set_grad_enabled(train_G):
# # Get Generator output given noise
# G_z = self.G(z, self.G.shared(gy))
# # Cast as necessary
# if self.G.fp16 and not self.D.fp16:
# G_z = G_z.float()
# if self.D.fp16 and not self.G.fp16:
# G_z = G_z.half()
# # Split_D means to run D once with real data and once with fake,
# # rather than concatenating along the batch dimension.
# if split_D:
# if self.DiffAugment_policy != "None":
# D_fake = self.D(DiffAugment(G_z, policy=self.DiffAugment_policy), gy)
# else:
# D_fake = self.D(G_z, gy)
# # D_fake = self.D(G_z, gy)
# if x is not None:
# if self.DiffAugment_policy != "None":
# D_real = self.D(DiffAugment(x, policy=self.DiffAugment_policy), dy)
# else:
# D_real = self.D(x, dy)
# # D_real = self.D(x, dy)
# return D_fake, D_real
# else:
# if return_G_z:
# return D_fake, G_z
# else:
# return D_fake
# # If real data is provided, concatenate it with the Generator's output
# # along the batch dimension for improved efficiency.
# else:
# D_input = torch.cat([G_z, x], 0) if x is not None else G_z
# D_class = torch.cat([gy, dy], 0) if dy is not None else gy
# # Get Discriminator output
# if self.DiffAugment_policy != "None":
# D_input = DiffAugment(D_input, policy=self.DiffAugment_policy)
# D_out = self.D(D_input, D_class)
# if x is not None:
# return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
# else:
# if return_G_z:
# return D_out, G_z
# else:
# return D_out
| 23,873 | 42.25 | 126 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0(num_classes=100):
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg, num_classes=num_classes)
def EfficientNetB0_2(num_classes=100):
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
# 'stride': [1, 2, 2, 2, 1, 2, 1], ##original
'stride': [1, 2, 2, 2, 2, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg, num_classes=num_classes)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 128, 128)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test() | 6,221 | 32.272727 | 106 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/BigGAN.py | import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
from models import layers
# import layers
# from sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d
# Architectures for G
# Attention is passed in in the format '32_64' to mean applying an attention
# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],
'upsample' : [True] * 7,
'resolution' : [8, 16, 32, 64, 128, 256, 512],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,10)}}
arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample' : [True] * 6,
'resolution' : [8, 16, 32, 64, 128, 256],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,9)}}
arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],
'upsample' : [True] * 5,
'resolution' : [8, 16, 32, 64, 128],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,8)}}
arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],
'out_channels' : [ch * item for item in [16, 8, 4, 2]],
'upsample' : [True] * 4,
'resolution' : [8, 16, 32, 64],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,7)}}
arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],
'out_channels' : [ch * item for item in [4, 4, 4]],
'upsample' : [True] * 3,
'resolution' : [8, 16, 32],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,6)}}
return arch
class BigGAN_Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(BigGAN_Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
# If using hierarchical latents, adjust z
if self.hier:
# Number of places z slots into
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
# Recalculate latent dimensionality for even splitting into chunks
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.which_embedding = nn.Embedding
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
else self.which_embedding)
self.which_bn = functools.partial(layers.ccbn,
which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
input_size=(self.shared_dim + self.z_chunk_size if self.G_shared
else self.n_classes),
norm_style=self.norm_style,
eps=self.BN_eps)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
self.linear = self.which_linear(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
else:
ys = [y] * len(self.blocks)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Second inner loop in case block has multiple layers
for block in blocklist:
h = block(h, ys[index])
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample' : [True] * 6 + [False],
'resolution' : [128, 64, 32, 16, 8, 4, 4 ],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample' : [True] * 5 + [False],
'resolution' : [64, 32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],
'downsample' : [True] * 4 + [False],
'resolution' : [32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,7)}}
arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],
'out_channels' : [item * ch for item in [4, 4, 4, 4]],
'downsample' : [True, True, False, False],
'resolution' : [16, 16, 16, 16],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,6)}}
return arch
class BigGAN_Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', **kwargs):
super(BigGAN_Discriminator, self).__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
# Architecture
self.arch = D_arch(self.ch, self.attention)[resolution]
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
# Embedding for projection discrimination
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
# Apply global sum pooling as in SN-GAN
h = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
out = self.linear(h)
# Get projection of final featureset onto class vectors and add to evidence
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out
# Parallelized G_D to minimize cross-gpu communication
# Without this, Generator outputs would get all-gathered and then rebroadcast.
class G_D(nn.Module):
def __init__(self, G, D):
super(G_D, self).__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False):
# If training G, enable grad tape
with torch.set_grad_enabled(train_G):
# Get Generator output given noise
G_z = self.G(z, self.G.shared(gy))
# Cast as necessary
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
# Split_D means to run D once with real data and once with fake,
# rather than concatenating along the batch dimension.
if split_D:
D_fake = self.D(G_z, gy)
if x is not None:
D_real = self.D(x, dy)
return D_fake, D_real
else:
if return_G_z:
return D_fake, G_z
else:
return D_fake
# If real data is provided, concatenate it with the Generator's output
# along the batch dimension for improved efficiency.
else:
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
# Get Discriminator output
D_out = self.D(D_input, D_class)
if x is not None:
return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
if return_G_z:
return D_out, G_z
else:
return D_out
| 19,745 | 42.493392 | 98 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
# self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1,
# bias=False) ##for cifar
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100, h=h//2
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(num_filters[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == blocks-1)))
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.relu)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
x = self.conv1(x) #64x64
x = self.bn1(x)
x = self.relu(x)
x = F.max_pool2d(x, 3, stride=2, padding=1) ##extra maxpool2d; 32x32
f0 = x
x, f1_pre = self.layer1(x) # 32x32
f1 = x
x, f2_pre = self.layer2(x) # 16x16
f2 = x
x, f3_pre = self.layer3(x) # 8x8
f3 = x
x = self.avgpool(x)
x = x.view(x.size(0), -1)
f4 = x
x = self.fc(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], x
else:
return [f0, f1, f2, f3, f4], x
else:
return x
def resnet8(**kwargs):
return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14(**kwargs):
return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet20(**kwargs):
return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet32(**kwargs):
return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet44(**kwargs):
return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet56(**kwargs):
return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet110(**kwargs):
return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet8x4(**kwargs):
return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet32x4(**kwargs):
return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs)
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = resnet8x4(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,967 | 29.764479 | 122 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/mobilenetv2.py | """
MobileNetV2 implementation used in
<Knowledge Distillation via Route Constrained Optimization>
"""
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
__all__ = ['mobilenetv2_T_w', 'mobile_half']
BN = None
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.blockname = None
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.names = ['0', '1', '2', '3', '4', '5', '6', '7']
def forward(self, x):
t = x
if self.use_res_connect:
return t + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
"""mobilenetV2"""
def __init__(self, T,
feature_dim,
input_size=128,
width_mult=1.,
remove_avg=False):
super(MobileNetV2, self).__init__()
self.remove_avg = remove_avg
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[T, 24, 2, 2], #s=1 for cifar
[T, 32, 3, 2],
[T, 64, 4, 2],
[T, 96, 3, 1],
[T, 160, 3, 2],
[T, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(32 * width_mult)
self.conv1 = conv_bn(3, input_channel, 2)
# building inverted residual blocks
self.blocks = nn.ModuleList([])
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * width_mult)
layers = []
strides = [s] + [1] * (n - 1)
for stride in strides:
layers.append(
InvertedResidual(input_channel, output_channel, stride, t)
)
input_channel = output_channel
self.blocks.append(nn.Sequential(*layers))
self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
self.conv2 = conv_1x1_bn(input_channel, self.last_channel)
# building classifier
self.classifier = nn.Sequential(
# nn.Dropout(0.5),
nn.Linear(self.last_channel, feature_dim),
)
# H = input_size // (32//2)
# self.avgpool = nn.AvgPool2d(H, ceil_mode=True)
self._initialize_weights()
print(T, width_mult)
def get_bn_before_relu(self):
bn1 = self.blocks[1][-1].conv[-1]
bn2 = self.blocks[2][-1].conv[-1]
bn3 = self.blocks[4][-1].conv[-1]
bn4 = self.blocks[6][-1].conv[-1]
return [bn1, bn2, bn3, bn4]
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.blocks)
return feat_m
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.blocks[0](out)
out = self.blocks[1](out)
f1 = out
out = self.blocks[2](out)
f2 = out
out = self.blocks[3](out)
out = self.blocks[4](out)
f3 = out
out = self.blocks[5](out)
out = self.blocks[6](out)
f4 = out
out = self.conv2(out)
if not self.remove_avg:
# out = self.avgpool(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
f5 = out
out = self.classifier(out)
if is_feat:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobilenetv2_T_w(T, W, feature_dim=100):
model = MobileNetV2(T=T, feature_dim=feature_dim, width_mult=W)
return model
def mobile_half(num_classes):
return mobilenetv2_T_w(6, 0.5, num_classes)
if __name__ == '__main__':
x = torch.randn(2, 3, 128, 128)
net = mobile_half(100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,822 | 27.404878 | 115 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/vgg.py | '''VGG '''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, cfg, batch_norm=False, num_classes=1000):
super(VGG, self).__init__()
self.block0 = self._make_layers(cfg[0], batch_norm, 3)
self.block1 = self._make_layers(cfg[1], batch_norm, cfg[0][-1])
self.block2 = self._make_layers(cfg[2], batch_norm, cfg[1][-1])
self.block3 = self._make_layers(cfg[3], batch_norm, cfg[2][-1])
self.block4 = self._make_layers(cfg[4], batch_norm, cfg[3][-1])
self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.pool4 = nn.AdaptiveAvgPool2d((1, 1)) #for cifar
self.pool4 = nn.MaxPool2d(kernel_size=4, stride=4) #for imagenet100
# self.classifier = nn.Linear(512, num_classes) #for cifar
self.classifier = nn.Sequential(
nn.Linear(512 * 4 * 4, 1024),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(1024, 1024),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(1024, num_classes),
) #for imagenet100
self._initialize_weights()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.block0)
feat_m.append(self.pool0)
feat_m.append(self.block1)
feat_m.append(self.pool1)
feat_m.append(self.block2)
feat_m.append(self.pool2)
feat_m.append(self.block3)
feat_m.append(self.pool3)
feat_m.append(self.block4)
feat_m.append(self.pool4)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block1[-1]
bn2 = self.block2[-1]
bn3 = self.block3[-1]
bn4 = self.block4[-1]
return [bn1, bn2, bn3, bn4]
def forward(self, x, is_feat=False, preact=False):
h = x.shape[2]
x = F.relu(self.block0(x))
f0 = x
x = self.pool0(x)
x = self.block1(x)
f1_pre = x
x = F.relu(x)
f1 = x
x = self.pool1(x)
x = self.block2(x)
f2_pre = x
x = F.relu(x)
f2 = x
x = self.pool2(x)
x = self.block3(x)
f3_pre = x
x = F.relu(x)
f3 = x
if h == 64:
x = self.pool3(x)
x = self.block4(x)
f4_pre = x
x = F.relu(x)
f4 = x
x = self.pool4(x)
x = x.view(x.size(0), -1)
f5 = x
x = self.classifier(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], x
else:
return [f0, f1, f2, f3, f4, f5], x
else:
return x
@staticmethod
def _make_layers(cfg, batch_norm=False, in_channels=3):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
layers = layers[:-1]
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [[64], [128], [256, 256], [512, 512], [512, 512]],
'B': [[64, 64], [128, 128], [256, 256], [512, 512], [512, 512]],
'D': [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]],
'E': [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]],
'S': [[64], [128], [256], [512], [512]],
}
def vgg8(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], **kwargs)
return model
def vgg8_bn(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], batch_norm=True, **kwargs)
return model
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['A'], **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(cfg['A'], batch_norm=True, **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['B'], **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(cfg['B'], batch_norm=True, **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['D'], **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(cfg['D'], batch_norm=True, **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['E'], **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(cfg['E'], batch_norm=True, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = vgg13_bn(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,304 | 28.695122 | 98 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/densenet.py | '''DenseNet in PyTorch.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
NC=3
IMG_SIZE = 128
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
# self.conv1 = nn.Conv2d(NC, num_planes, kernel_size=3, padding=1, bias=False)
self.conv1 = nn.Sequential(
nn.Conv2d(NC, num_planes, kernel_size=4, padding=1, stride=2, bias=False),
nn.BatchNorm2d(num_planes),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121(num_classes=10):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32, num_classes=num_classes)
def DenseNet169(num_classes=10):
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32, num_classes=num_classes)
def DenseNet201(num_classes=10):
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32, num_classes=num_classes)
def DenseNet161(num_classes=10):
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48, num_classes=num_classes)
def test_densenet():
net = DenseNet121(num_classes=100)
x = torch.randn(2,NC,IMG_SIZE,IMG_SIZE)
y = net(Variable(x))
print(y.shape)
if __name__ == "__main__":
test_densenet()
| 4,125 | 32.544715 | 96 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/ResNet_extract.py | '''
ResNet-based model to map an image from pixel space to a features space.
Need to be pretrained on the dataset.
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
IMG_SIZE=128
NC=3
resize=(128,128)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_extract(nn.Module):
def __init__(self, block, num_blocks, num_classes=100, nc=NC, img_height=IMG_SIZE, img_width=IMG_SIZE):
super(ResNet_extract, self).__init__()
self.in_planes = 64
self.main = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h
nn.BatchNorm2d(64),
nn.ReLU(),
self._make_layer(block, 64, num_blocks[0], stride=1), # h=h
nn.MaxPool2d(kernel_size=2, stride=2), #64
self._make_layer(block, 128, num_blocks[1], stride=2), #32
nn.MaxPool2d(kernel_size=2, stride=2), #16
self._make_layer(block, 256, num_blocks[2], stride=2), #8
self._make_layer(block, 512, num_blocks[3], stride=2), #4
nn.AvgPool2d(kernel_size=4)
)
self.classifier_1 = nn.Sequential(
nn.Linear(512*block.expansion, img_height*img_width*nc),
)
self.classifier_2 = nn.Sequential(
nn.BatchNorm1d(img_height*img_width*nc),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(img_height*img_width*nc, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(1024, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(256, num_classes),
)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
# x = nn.functional.interpolate(x,size=resize,mode='bilinear',align_corners=True)
features = self.main(x)
features = features.view(features.size(0), -1)
features = self.classifier_1(features)
out = self.classifier_2(features)
return out, features
def ResNet18_extract(num_classes=100):
return ResNet_extract(BasicBlock, [2,2,2,2], num_classes=num_classes)
def ResNet34_extract(num_classes=100):
return ResNet_extract(BasicBlock, [3,4,6,3], num_classes=num_classes)
def ResNet50_extract(num_classes=100):
return ResNet_extract(Bottleneck, [3,4,6,3], num_classes=num_classes)
def ResNet101_extract(num_classes=100):
return ResNet_extract(Bottleneck, [3,4,23,3], num_classes=num_classes)
def ResNet152_extract(num_classes=100):
return ResNet_extract(Bottleneck, [3,8,36,3], num_classes=num_classes)
if __name__ == "__main__":
net = ResNet34_extract(num_classes=100).cuda()
x = torch.randn(16,3,128,128).cuda()
out, features = net(x)
print(out.size())
print(features.size())
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(net))
| 5,781 | 34.042424 | 107 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/resnetv2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100, h=h//2
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out, f4_pre = self.layer4(out)
f4 = out
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
if preact:
return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]
else:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def ResNet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def ResNet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def ResNet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def ResNet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def ResNet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if __name__ == '__main__':
net = ResNet18(num_classes=100)
x = torch.randn(2, 3, 128, 128)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,123 | 34.442786 | 110 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/cDR_MLP.py | '''
Conditional Density Ration Estimation via Multilayer Perceptron
Multilayer Perceptron : trained to model density ratio in a feature space
Its input is the output of a pretrained Deep CNN, say ResNet-34
'''
import torch
import torch.nn as nn
IMG_SIZE=128
NC=3
N_CLASS = 100
cfg = {"MLP3": [512,256,128],
"MLP5": [2048,1024,512,256,128],
"MLP7": [2048,2048,1024,1024,512,256,128],
"MLP9": [2048,2048,1024,1024,512,512,256,256,128],}
class cDR_MLP(nn.Module):
def __init__(self, MLP_name, p_dropout=0.5, init_in_dim = IMG_SIZE**2*NC, num_classes = N_CLASS):
super(cDR_MLP, self).__init__()
self.init_in_dim = init_in_dim
self.p_dropout=p_dropout
self.num_classes = num_classes
self.label_emb = nn.Embedding(num_classes, num_classes)
layers = self._make_layers(cfg[MLP_name])
layers += [nn.Linear(cfg[MLP_name][-1], 1)]
layers += [nn.ReLU()]
self.main = nn.Sequential(*layers)
def _make_layers(self, cfg):
layers = []
in_dim = self.init_in_dim #initial input dimension
for x in cfg:
if in_dim == self.init_in_dim:
layers += [nn.Linear(in_dim+self.num_classes, x),
nn.GroupNorm(8, x),
nn.ReLU(inplace=True),
nn.Dropout(self.p_dropout) # do we really need dropout?
]
else:
layers += [nn.Linear(in_dim, x),
nn.GroupNorm(8, x),
nn.ReLU(inplace=True),
nn.Dropout(self.p_dropout) # do we really need dropout?
]
in_dim = x
return layers
def forward(self, x, labels):
x = torch.cat((self.label_emb(labels), x), -1)
out = self.main(x)
return out
if __name__ == "__main__":
net = cDR_MLP('MLP5').cuda()
x = torch.randn((5,IMG_SIZE**2*NC)).cuda()
labels = torch.LongTensor(5).random_(0, N_CLASS).cuda()
out = net(x, labels)
print(out.size())
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(net))
| 2,393 | 30.92 | 101 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/InceptionV3.py | '''
Inception v3
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
__all__ = ['Inception3', 'inception_v3']
model_urls = {
# Inception v3 ported from TensorFlow
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}
def inception_v3(pretrained=False, **kwargs):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
model = Inception3(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
return model
return Inception3(**kwargs)
class Inception3(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.numel()))
values = values.view(m.weight.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
# if self.training and self.aux_logits:
# aux = self.AuxLogits(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
features = x.view(x.size(0), -1)
# N x 2048 x 1 x 1
x = F.dropout(x, training=self.training)
# N x 2048 x 1 x 1
x = x.view(x.size(0), -1)
# N x 2048
x = self.fc(x)
# N x 1000 (num_classes)
# if self.training and self.aux_logits:
# return x, features, aux
return x, features
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
# N x 768 x 17 x 17
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 768 x 1 x 1
x = x.view(x.size(0), -1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True) | 12,702 | 36.252199 | 102 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/ShuffleNetv1.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it (Following Table 5 of "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design")
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.stride = stride
mid_planes = int(out_planes/4)
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
preact = torch.cat([out, res], 1) if self.stride == 2 else out+res
out = F.relu(preact)
# out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
if self.is_last:
return out, preact
else:
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) #original
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], num_classes)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes,
stride=stride,
groups=groups,
is_last=(i == num_blocks - 1)))
self.in_planes = out_planes
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNet currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
def ShuffleV1(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
if __name__ == '__main__':
x = torch.randn(2, 3, 128, 128)
net = ShuffleV1(num_classes=100)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 5,107 | 34.472222 | 190 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/ShuffleNetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it (Following Table 5 of "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design")
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
preact = self.bn3(self.conv3(out))
out = F.relu(preact)
# out = F.relu(self.bn3(self.conv3(out)))
preact = torch.cat([x1, preact], 1)
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
if self.is_last:
return out, preact
else:
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size, num_classes=10):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], num_classes)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1)))
self.in_channels = out_channels
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNetV2 currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1)
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
configs = {
0.2: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 3, 3)
},
0.3: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 7, 3)
},
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def ShuffleV2(**kwargs):
model = ShuffleNetV2(net_size=1, **kwargs)
return model
if __name__ == '__main__':
net = ShuffleV2(num_classes=100)
x = torch.randn(2, 3, 128, 128)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 7,241 | 33.160377 | 190 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
# self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100
self.bn0 = nn.BatchNorm2d(nChannels[0])
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
# out = self.conv1(x) #for cifar
out = F.relu(self.bn0(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = wrn_16_1(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,807 | 32.188571 | 116 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y), message)
| 746 | 23.9 | 59 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dementions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
# _MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'ssum', 'sum_size'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input, gain=None, bias=None):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
out = F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
if gain is not None:
out = out + gain
if bias is not None:
out = out + bias
return out
# Resize the input to (B, C, -1).
input_shape = input.size()
# print(input_shape)
input = input.view(input.size(0), input.size(1), -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
# print('it begins')
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# if self._parallel_id == 0:
# # print('here')
# sum, ssum, num = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
# else:
# # print('there')
# sum, ssum, num = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# print('how2')
# num = sum_size
# print('Sum: %f, ssum: %f, sumsize: %f, insum: %f' %(float(sum.sum().cpu()), float(ssum.sum().cpu()), float(sum_size), float(input_sum.sum().cpu())))
# Fix the graph
# sum = (sum.detach() - input_sum.detach()) + input_sum
# ssum = (ssum.detach() - input_ssum.detach()) + input_ssum
# mean = sum / num
# var = ssum / num - mean ** 2
# # var = (ssum - mean * sum) / num
# inv_std = torch.rsqrt(var + self.eps)
# Compute the output.
if gain is not None:
# print('gaining')
# scale = _unsqueeze_ft(inv_std) * gain.squeeze(-1)
# shift = _unsqueeze_ft(mean) * scale - bias.squeeze(-1)
# output = input * scale - shift
output = (input - _unsqueeze_ft(mean)) * (_unsqueeze_ft(inv_std) * gain.squeeze(-1)) + bias.squeeze(-1)
elif self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
# print('a')
# print(type(sum_), type(ssum), type(sum_size), sum_.shape, ssum.shape, sum_size)
# broadcasted = Broadcast.apply(target_gpus, sum_, ssum, torch.tensor(sum_size).float().to(sum_.device))
# print('b')
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
# outputs.append((rec[0], _MasterMessage(*broadcasted[i*3:i*3+3])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, torch.rsqrt(bias_var + self.eps)
# return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input) | 14,882 | 41.644699 | 159 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/sync_batchnorm/batchnorm_reimpl.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNormReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,383 | 30.786667 | 95 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/make_fake_datasets/models/layers/layers.py | ''' Layers
This file contains various layers for the BigGAN models.
'''
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
#from sync_batchnorm import SynchronizedBatchNorm2d as SyncBN2d
# Projection of x onto y
def proj(x, y):
return torch.mm(y, x.t()) * y / torch.mm(y, y.t())
# Orthogonalize x wrt list of vectors ys
def gram_schmidt(x, ys):
for y in ys:
x = x - proj(x, y)
return x
# Apply num_itrs steps of the power method to estimate top N singular values.
def power_iteration(W, u_, update=True, eps=1e-12):
# Lists holding singular vectors and values
us, vs, svs = [], [], []
for i, u in enumerate(u_):
# Run one step of the power iteration
with torch.no_grad():
v = torch.matmul(u, W)
# Run Gram-Schmidt to subtract components of all other singular vectors
v = F.normalize(gram_schmidt(v, vs), eps=eps)
# Add to the list
vs += [v]
# Update the other singular vector
u = torch.matmul(v, W.t())
# Run Gram-Schmidt to subtract components of all other singular vectors
u = F.normalize(gram_schmidt(u, us), eps=eps)
# Add to the list
us += [u]
if update:
u_[i][:] = u
# Compute this singular value and add it to the list
svs += [torch.squeeze(torch.matmul(torch.matmul(v, W.t()), u.t()))]
#svs += [torch.sum(F.linear(u, W.transpose(0, 1)) * v)]
return svs, us, vs
# Convenience passthrough function
class identity(nn.Module):
def forward(self, input):
return input
# Spectral normalization base class
class SN(object):
def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12):
# Number of power iterations per step
self.num_itrs = num_itrs
# Number of singular values
self.num_svs = num_svs
# Transposed?
self.transpose = transpose
# Epsilon value for avoiding divide-by-0
self.eps = eps
# Register a singular vector for each sv
for i in range(self.num_svs):
self.register_buffer('u%d' % i, torch.randn(1, num_outputs))
self.register_buffer('sv%d' % i, torch.ones(1))
# Singular vectors (u side)
@property
def u(self):
return [getattr(self, 'u%d' % i) for i in range(self.num_svs)]
# Singular values;
# note that these buffers are just for logging and are not used in training.
@property
def sv(self):
return [getattr(self, 'sv%d' % i) for i in range(self.num_svs)]
# Compute the spectrally-normalized weight
def W_(self):
W_mat = self.weight.view(self.weight.size(0), -1)
if self.transpose:
W_mat = W_mat.t()
# Apply num_itrs power iterations
for _ in range(self.num_itrs):
svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps)
# Update the svs
if self.training:
with torch.no_grad(): # Make sure to do this in a no_grad() context or you'll get memory leaks!
for i, sv in enumerate(svs):
self.sv[i][:] = sv
return self.weight / svs[0]
# 2D Conv layer with spectral norm
class SNConv2d(nn.Conv2d, SN):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps)
def forward(self, x):
return F.conv2d(x, self.W_(), self.bias, self.stride,
self.padding, self.dilation, self.groups)
# Linear layer with spectral norm
class SNLinear(nn.Linear, SN):
def __init__(self, in_features, out_features, bias=True,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Linear.__init__(self, in_features, out_features, bias)
SN.__init__(self, num_svs, num_itrs, out_features, eps=eps)
def forward(self, x):
return F.linear(x, self.W_(), self.bias)
# Embedding layer with spectral norm
# We use num_embeddings as the dim instead of embedding_dim here
# for convenience sake
class SNEmbedding(nn.Embedding, SN):
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
max_norm=None, norm_type=2, scale_grad_by_freq=False,
sparse=False, _weight=None,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Embedding.__init__(self, num_embeddings, embedding_dim, padding_idx,
max_norm, norm_type, scale_grad_by_freq,
sparse, _weight)
SN.__init__(self, num_svs, num_itrs, num_embeddings, eps=eps)
def forward(self, x):
return F.embedding(x, self.W_())
# A non-local block as used in SA-GAN
# Note that the implementation as described in the paper is largely incorrect;
# refer to the released code for the actual implementation.
class Attention(nn.Module):
def __init__(self, ch, which_conv=SNConv2d, name='attention'):
super(Attention, self).__init__()
# Channel multiplier
self.ch = ch
self.which_conv = which_conv
self.theta = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.phi = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.g = self.which_conv(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False)
self.o = self.which_conv(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False)
# Learnable gain parameter
self.gamma = P(torch.tensor(0.), requires_grad=True)
def forward(self, x, y=None):
# Apply convs
theta = self.theta(x)
phi = F.max_pool2d(self.phi(x), [2,2])
g = F.max_pool2d(self.g(x), [2,2])
# Perform reshapes
theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3])
phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4)
g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4)
# Matmul and softmax to get attention maps
beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)
# Attention map times g path
o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3]))
return self.gamma * o + x
# Fused batchnorm op
def fused_bn(x, mean, var, gain=None, bias=None, eps=1e-5):
# Apply scale and shift--if gain and bias are provided, fuse them here
# Prepare scale
scale = torch.rsqrt(var + eps)
# If a gain is provided, use it
if gain is not None:
scale = scale * gain
# Prepare shift
shift = mean * scale
# If bias is provided, use it
if bias is not None:
shift = shift - bias
return x * scale - shift
#return ((x - mean) / ((var + eps) ** 0.5)) * gain + bias # The unfused way.
# Manual BN
# Calculate means and variances using mean-of-squares minus mean-squared
def manual_bn(x, gain=None, bias=None, return_mean_var=False, eps=1e-5):
# Cast x to float32 if necessary
float_x = x.float()
# Calculate expected value of x (m) and expected value of x**2 (m2)
# Mean of x
m = torch.mean(float_x, [0, 2, 3], keepdim=True)
# Mean of x squared
m2 = torch.mean(float_x ** 2, [0, 2, 3], keepdim=True)
# Calculate variance as mean of squared minus mean squared.
var = (m2 - m **2)
# Cast back to float 16 if necessary
var = var.type(x.type())
m = m.type(x.type())
# Return mean and variance for updating stored mean/var if requested
if return_mean_var:
return fused_bn(x, m, var, gain, bias, eps), m.squeeze(), var.squeeze()
else:
return fused_bn(x, m, var, gain, bias, eps)
# My batchnorm, supports standing stats
class myBN(nn.Module):
def __init__(self, num_channels, eps=1e-5, momentum=0.1):
super(myBN, self).__init__()
# momentum for updating running stats
self.momentum = momentum
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Register buffers
self.register_buffer('stored_mean', torch.zeros(num_channels))
self.register_buffer('stored_var', torch.ones(num_channels))
self.register_buffer('accumulation_counter', torch.zeros(1))
# Accumulate running means and vars
self.accumulate_standing = False
# reset standing stats
def reset_stats(self):
self.stored_mean[:] = 0
self.stored_var[:] = 0
self.accumulation_counter[:] = 0
def forward(self, x, gain, bias):
if self.training:
out, mean, var = manual_bn(x, gain, bias, return_mean_var=True, eps=self.eps)
# If accumulating standing stats, increment them
if self.accumulate_standing:
self.stored_mean[:] = self.stored_mean + mean.data
self.stored_var[:] = self.stored_var + var.data
self.accumulation_counter += 1.0
# If not accumulating standing stats, take running averages
else:
self.stored_mean[:] = self.stored_mean * (1 - self.momentum) + mean * self.momentum
self.stored_var[:] = self.stored_var * (1 - self.momentum) + var * self.momentum
return out
# If not in training mode, use the stored statistics
else:
mean = self.stored_mean.view(1, -1, 1, 1)
var = self.stored_var.view(1, -1, 1, 1)
# If using standing stats, divide them by the accumulation counter
if self.accumulate_standing:
mean = mean / self.accumulation_counter
var = var / self.accumulation_counter
return fused_bn(x, mean, var, gain, bias, self.eps)
# Simple function to handle groupnorm norm stylization
def groupnorm(x, norm_style):
# If number of channels specified in norm_style:
if 'ch' in norm_style:
ch = int(norm_style.split('_')[-1])
groups = max(int(x.shape[1]) // ch, 1)
# If number of groups specified in norm style
elif 'grp' in norm_style:
groups = int(norm_style.split('_')[-1])
# If neither, default to groups = 16
else:
groups = 16
return F.group_norm(x, groups)
# Class-conditional bn
# output size is the number of channels, input size is for the linear layers
# Andy's Note: this class feels messy but I'm not really sure how to clean it up
# Suggestions welcome! (By which I mean, refactor this and make a pull request
# if you want to make this more readable/usable).
class ccbn(nn.Module):
def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1,
cross_replica=False, mybn=False, norm_style='bn',):
super(ccbn, self).__init__()
self.output_size, self.input_size = output_size, input_size
# Prepare gain and bias layers
self.gain = which_linear(input_size, output_size)
self.bias = which_linear(input_size, output_size)
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Use cross-replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# Norm style?
self.norm_style = norm_style
if self.cross_replica:
self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)
elif self.mybn:
self.bn = myBN(output_size, self.eps, self.momentum)
elif self.norm_style in ['bn', 'in']:
self.register_buffer('stored_mean', torch.zeros(output_size))
self.register_buffer('stored_var', torch.ones(output_size))
def forward(self, x, y):
# Calculate class-conditional gains and biases
gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1)
bias = self.bias(y).view(y.size(0), -1, 1, 1)
# If using my batchnorm
if self.mybn or self.cross_replica:
return self.bn(x, gain=gain, bias=bias)
# else:
else:
if self.norm_style == 'bn':
out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None,
self.training, 0.1, self.eps)
elif self.norm_style == 'in':
out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None,
self.training, 0.1, self.eps)
elif self.norm_style == 'gn':
out = groupnorm(x, self.normstyle)
elif self.norm_style == 'nonorm':
out = x
return out * gain + bias
def extra_repr(self):
s = 'out: {output_size}, in: {input_size},'
s +=' cross_replica={cross_replica}'
return s.format(**self.__dict__)
# Normal, non-class-conditional BN
class bn(nn.Module):
def __init__(self, output_size, eps=1e-5, momentum=0.1,
cross_replica=False, mybn=False):
super(bn, self).__init__()
self.output_size= output_size
# Prepare gain and bias layers
self.gain = P(torch.ones(output_size), requires_grad=True)
self.bias = P(torch.zeros(output_size), requires_grad=True)
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Use cross-replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
if self.cross_replica:
self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)
elif mybn:
self.bn = myBN(output_size, self.eps, self.momentum)
# Register buffers if neither of the above
else:
self.register_buffer('stored_mean', torch.zeros(output_size))
self.register_buffer('stored_var', torch.ones(output_size))
def forward(self, x, y=None):
if self.cross_replica or self.mybn:
gain = self.gain.view(1,-1,1,1)
bias = self.bias.view(1,-1,1,1)
return self.bn(x, gain=gain, bias=bias)
else:
return F.batch_norm(x, self.stored_mean, self.stored_var, self.gain,
self.bias, self.training, self.momentum, self.eps)
# Generator blocks
# Note that this class assumes the kernel size and padding (and any other
# settings) have been selected in the main generator module and passed in
# through the which_conv arg. Similar rules apply with which_bn (the input
# size [which is actually the number of channels of the conditional info] must
# be preselected)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels,
which_conv=nn.Conv2d, which_bn=bn, activation=None,
upsample=None):
super(GBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.which_conv, self.which_bn = which_conv, which_bn
self.activation = activation
self.upsample = upsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.out_channels)
self.conv2 = self.which_conv(self.out_channels, self.out_channels)
self.learnable_sc = in_channels != out_channels or upsample
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels,
kernel_size=1, padding=0)
# Batchnorm layers
self.bn1 = self.which_bn(in_channels)
self.bn2 = self.which_bn(out_channels)
# upsample layers
self.upsample = upsample
def forward(self, x, y):
h = self.activation(self.bn1(x, y))
if self.upsample:
h = self.upsample(h)
x = self.upsample(x)
h = self.conv1(h)
h = self.activation(self.bn2(h, y))
h = self.conv2(h)
if self.learnable_sc:
x = self.conv_sc(x)
return h + x
# Residual block for the discriminator
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True,
preactivation=False, activation=None, downsample=None,):
super(DBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
# If using wide D (as in SA-GAN and BigGAN), change the channel pattern
self.hidden_channels = self.out_channels if wide else self.in_channels
self.which_conv = which_conv
self.preactivation = preactivation
self.activation = activation
self.downsample = downsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.hidden_channels)
self.conv2 = self.which_conv(self.hidden_channels, self.out_channels)
self.learnable_sc = True if (in_channels != out_channels) or downsample else False
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels,
kernel_size=1, padding=0)
def shortcut(self, x):
if self.preactivation:
if self.learnable_sc:
x = self.conv_sc(x)
if self.downsample:
x = self.downsample(x)
else:
if self.downsample:
x = self.downsample(x)
if self.learnable_sc:
x = self.conv_sc(x)
return x
def forward(self, x):
if self.preactivation:
# h = self.activation(x) # NOT TODAY SATAN
# Andy's note: This line *must* be an out-of-place ReLU or it
# will negatively affect the shortcut connection.
h = F.relu(x)
else:
h = x
h = self.conv1(h)
h = self.conv2(self.activation(h))
if self.downsample:
h = self.downsample(h)
return h + self.shortcut(x)
# dogball
| 17,132 | 36.245652 | 101 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/teacher_data_loader.py | import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import PIL
from PIL import Image
import h5py
import os
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, transform=None):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.transform = transform
def __getitem__(self, index):
## for RGB only
image = self.images[index]
if self.transform is not None:
image = np.transpose(image, (1, 2, 0)) #C * H * W ----> H * W * C
image = Image.fromarray(np.uint8(image), mode = 'RGB') #H * W * C
image = self.transform(image)
# ## for grey scale only
# image = self.images[index]
# if self.transform is not None:
# image = Image.fromarray(np.uint8(image[0]), mode = 'L') #H * W * C
# image = self.transform(image)
# # image = np.array(image)
# # image = image[np.newaxis,:,:]
# # print(image.shape)
if self.labels is not None:
label = self.labels[index]
return image, label
return image
def __len__(self):
return self.n_images
def get_imagenet100(num_classes=100, real_data="", fake_data="None", nfake=1e30, batch_size=128, num_workers=0):
## load real data
trainset_h5py_file = os.path.join(real_data, 'ImageNet_128x128_100Class.h5')
hf = h5py.File(trainset_h5py_file, 'r')
real_images = hf['images_train'][:]
real_labels = hf['labels_train'][:]
images_test = hf['images_valid'][:]
labels_test = hf['labels_valid'][:]
hf.close()
## load fake data
if fake_data != 'None':
## load fake data
with h5py.File(fake_data, "r") as f:
images_train = f['fake_images'][:]
labels_train = f['fake_labels'][:]
labels_train = labels_train.astype(int)
if nfake<len(labels_train):
indx_fake = []
for i in range(num_classes):
indx_fake_i = np.where(labels_train==i)[0]
if i != (num_classes-1):
nfake_i = nfake//num_classes
else:
nfake_i = nfake-(nfake//num_classes)*i
indx_fake_i = np.random.choice(indx_fake_i, size=min(int(nfake_i), len(indx_fake_i)), replace=False)
indx_fake.append(indx_fake_i)
#end for i
indx_fake = np.concatenate(indx_fake)
images_train = images_train[indx_fake]
labels_train = labels_train[indx_fake]
## concatenate
images_train = np.concatenate((real_images, images_train), axis=0)
labels_train = np.concatenate((real_labels, labels_train), axis=0)
labels_train = labels_train.astype(int)
else:
images_train = real_images
labels_train = real_labels
## compute the mean and std for normalization
train_means = []
train_stds = []
for i in range(3):
images_i = images_train[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
## info of training set and test set
print("\n Training set: {}x{}x{}x{}; Testing set: {}x{}x{}x{}.".format(images_train.shape[0], images_train.shape[1], images_train.shape[2], images_train.shape[3], images_test.shape[0], images_test.shape[1], images_test.shape[2], images_test.shape[3]))
transform_cnn_train = transforms.Compose([
# transforms.RandomCrop(128, padding=4),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
transform_cnn_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
trainset_cnn = IMGs_dataset(images_train, labels_train, transform=transform_cnn_train)
train_loader = torch.utils.data.DataLoader(trainset_cnn, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testset_cnn = IMGs_dataset(images_test, labels_test, transform=transform_cnn_test)
test_loader = torch.utils.data.DataLoader(testset_cnn, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_loader, test_loader
| 4,779 | 36.34375 | 255 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/teacher.py | print("\n ===================================================================================================")
import os
import os.path as osp
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from utils import AverageMeter, accuracy
from models import model_dict
from teacher_data_loader import get_imagenet100
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='train teacher network.')
parser.add_argument('--root_path', default="", type=str)
parser.add_argument('--real_data', default="", type=str)
parser.add_argument('--num_classes', type=int, default=100)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--fake_data', default="None", type=str)
parser.add_argument('--nfake', default=1e30, type=float)
parser.add_argument('--finetune', action='store_true', default=False)
parser.add_argument('--init_model_path', type=str, default='')
parser.add_argument('--arch', type=str)
parser.add_argument('--epochs', type=int, default=120)
parser.add_argument('--resume_epoch', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lr_decay_epochs', type=str, default='30_60_90', help='decay lr at which epoch; separate by _')
parser.add_argument('--lr_decay_factor', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--save_interval', type=int, default=20)
args = parser.parse_args()
#######################################
''' set seed '''
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
''' save folders '''
if args.fake_data=="None" or args.nfake<=0:
save_folder = os.path.join(args.root_path, 'output/teachers/vanilla')
else:
fake_data_name = args.fake_data.split('/')[-1]
save_folder = os.path.join(args.root_path, 'output/teachers/{}_useNfake_{}'.format(fake_data_name, args.nfake))
os.makedirs(save_folder, exist_ok=True)
setting_name = "{}_lr_{}_decay_{}".format(args.arch, args.lr, args.weight_decay)
save_intrain_folder = os.path.join(save_folder, 'ckpts_in_train', setting_name)
os.makedirs(save_intrain_folder, exist_ok=True)
''' dataloaders '''
train_loader, val_loader = get_imagenet100(num_classes=args.num_classes, real_data=args.real_data, fake_data=args.fake_data, nfake=args.nfake, batch_size=args.batch_size, num_workers=args.num_workers)
#######################################
''' learning rate decay '''
lr_decay_epochs = (args.lr_decay_epochs).split("_")
lr_decay_epochs = [int(epoch) for epoch in lr_decay_epochs]
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate """
lr = args.lr
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * args.lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
''' init. models '''
model = model_dict[args.arch](num_classes=args.num_classes).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
''' whether finetune '''
if args.finetune:
ckpt_model_filename = os.path.join(save_folder, 'ckpt_{}_epoch_{}_finetune_last.pth'.format(args.arch, args.epochs))
## load pre-trained model
checkpoint = torch.load(args.init_model_path)
model.load_state_dict(checkpoint['model'])
else:
ckpt_model_filename = os.path.join(save_folder, 'ckpt_{}_epoch_{}_last.pth'.format(args.arch, args.epochs))
print('\n ' + ckpt_model_filename)
''' start training '''
if not os.path.isfile(ckpt_model_filename):
print("\n Start training {} >>>".format(args.arch))
if args.resume_epoch>0:
save_file = save_intrain_folder + "/ckpt_{}_epoch_{}.pth".format(args.arch, args.resume_epoch)
checkpoint = torch.load(save_file)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
#end if
total_train_time = 0
for epoch in range(args.resume_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
##########################
## Training Stage
model.train()
loss_record = AverageMeter()
acc_record = AverageMeter()
start = time.time()
for x, target in train_loader:
optimizer.zero_grad()
x = x.cuda()
target = target.type(torch.long).cuda()
output = model(x)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
batch_acc = accuracy(output, target, topk=(1,))[0]
loss_record.update(loss.item(), x.size(0))
acc_record.update(batch_acc.item(), x.size(0))
run_time = time.time() - start
total_train_time+=run_time
info = '\n train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}'.format(epoch+1, args.epochs, run_time, loss_record.avg, acc_record.avg)
print(info)
##########################
## Testing Stage
model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x.cuda()
target = target.type(torch.long).cuda()
with torch.no_grad():
output = model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
loss_record.update(loss.item(), x.size(0))
acc_record.update(batch_acc.item(), x.size(0))
run_time = time.time() - start
info = '\r test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}'.format(epoch+1, args.epochs, run_time, loss_record.avg, acc_record.avg)
print(info)
print('\r Total training time: {:.2f} seconds.'.format(total_train_time))
##########################
## save checkpoint
if (epoch+1) % args.save_interval==0 :
save_file = save_intrain_folder + "/ckpt_{}_epoch_{}.pth".format(args.arch, epoch+1)
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, save_file)
## end for epoch
print("\n End training CNN")
## save model
torch.save({'model': model.state_dict()}, ckpt_model_filename)
else:
print("\n Loading pre-trained {}.".format(args.arch))
checkpoint = torch.load(ckpt_model_filename)
model.load_state_dict(checkpoint['model'])
##########################
model = model.cuda()
model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
for x, target in val_loader:
x = x.cuda()
target = target.type(torch.long).cuda()
with torch.no_grad():
output = model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
loss_record.update(loss.item(), x.size(0))
acc_record.update(batch_acc.item(), x.size(0))
print('\n Test accuracy of {}: {:.2f}'.format(args.arch, acc_record.avg))
print("\n ===================================================================================================")
| 7,727 | 33.5 | 200 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/student.py | print("\n ===================================================================================================")
import os
import os.path as osp
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from utils import AverageMeter, accuracy
from wrapper import wrapper
from models import model_dict
from student_dataset import IMGs_dataset
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='train SSKD student network.')
parser.add_argument('--root_path', default="", type=str)
parser.add_argument('--real_data', default="", type=str)
parser.add_argument('--num_classes', type=int, default=100)
parser.add_argument('--num_workers', default=0, type=int)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--fake_data', default="None", type=str)
parser.add_argument('--nfake', default=1e30, type=float)
parser.add_argument('--finetune', action='store_true', default=False)
parser.add_argument('--init_student_path', type=str, default='')
parser.add_argument('--t_finetune', action='store_true', default=False)
parser.add_argument('--init_teacher_path', type=str, default='')
parser.add_argument('--s_arch', type=str) # student architecture
parser.add_argument('--t_path', type=str) # teacher checkpoint path
parser.add_argument('--t_epochs', type=int, default=60, help="for training ssp_head")
parser.add_argument('--epochs', type=int, default=120)
parser.add_argument('--resume_epoch', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lr_decay_epochs', type=str, default='30_60_90', help='decay lr at which epoch; separate by _')
parser.add_argument('--lr_decay_factor', type=float, default=0.1)
parser.add_argument('--t_lr', type=float, default=0.01)
parser.add_argument('--t_lr_decay_epochs', type=str, default='30_45', help='decay lr at which epoch; separate by _')
parser.add_argument('--t_lr_decay_factor', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--save_interval', type=int, default=20)
parser.add_argument('--ce_weight', type=float, default=1.0) # cross-entropy #was 0.1 for cifar but 1.0 for imagenet100
parser.add_argument('--kd_weight', type=float, default=0.9) # knowledge distillation
parser.add_argument('--tf_weight', type=float, default=2.7) # transformation
parser.add_argument('--ss_weight', type=float, default=10.0) # self-supervision
parser.add_argument('--kd_T', type=float, default=4.0) # temperature in KD
parser.add_argument('--tf_T', type=float, default=4.0) # temperature in LT
parser.add_argument('--ss_T', type=float, default=0.5) # temperature in SS
parser.add_argument('--ratio_tf', type=float, default=1.0) # keep how many wrong predictions of LT
parser.add_argument('--ratio_ss', type=float, default=0.75) # keep how many wrong predictions of SS
args = parser.parse_args()
#######################################
''' set seed '''
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
''' save folders '''
def get_teacher_name(model_path):
"""parse teacher name"""
segments = model_path.split('/')[-1].split('_')
if segments[1] != 'wrn':
return segments[1]
else:
return segments[1] + '_' + segments[2] + '_' + segments[3]
args.t_arch = get_teacher_name(args.t_path) ## get teacher's name
setting_name = 'S_{}_T_{}_lr_{}_decay_{}'.format(args.s_arch, args.t_arch, args.lr, args.weight_decay)
if args.fake_data=="None" or args.nfake<=0:
save_folder = os.path.join(args.root_path, 'output/students/vanilla')
else:
fake_data_name = args.fake_data.split('/')[-1]
save_folder = os.path.join(args.root_path, 'output/students/{}_useNfake_{}'.format(fake_data_name, args.nfake))
save_folder_ssp = os.path.join(args.root_path, 'output/students/vanilla', 'ssp_heads')
os.makedirs(save_folder_ssp, exist_ok=True)
save_intrain_folder = os.path.join(save_folder, 'ckpts_in_train', setting_name)
os.makedirs(save_intrain_folder, exist_ok=True)
''' dataloaders '''
trainset = IMGs_dataset(train=True, num_classes=args.num_classes, real_data=args.real_data, fake_data=args.fake_data, nfake=args.nfake)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=False)
valset = IMGs_dataset(train=False, num_classes=args.num_classes, real_data=args.real_data, fake_data=args.fake_data, nfake=args.nfake)
val_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=False)
# trainset_ssp = IMGs_dataset(train=True, num_classes=args.num_classes, real_data=args.real_data, fake_data="None")
# train_loader_ssp = DataLoader(trainset_ssp, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=False)
train_loader_ssp = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=False)
#####################################################
''' Load teacher model and train ssp head '''
t_lr_decay_epochs = (args.t_lr_decay_epochs).split("_")
t_lr_decay_epochs = [int(epoch) for epoch in t_lr_decay_epochs]
def adjust_learning_rate1(optimizer, epoch):
"""decrease the learning rate """
lr = args.t_lr
num_decays = len(t_lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= t_lr_decay_epochs[decay_i]:
lr = lr * args.t_lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
## load teacher
t_model = model_dict[args.t_arch](num_classes=args.num_classes).cuda()
state_dict = torch.load(args.t_path)['model']
t_model.load_state_dict(state_dict)
t_model = wrapper(module=t_model).cuda()
## pretrained teacher model's test accuracy
t_model.eval()
t_acc_record = AverageMeter()
t_loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x[:,0,:,:,:].cuda()
target = target.long().cuda()
with torch.no_grad():
output, _, feat = t_model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
t_acc_record.update(batch_acc.item(), x.size(0))
t_loss_record.update(loss.item(), x.size(0))
run_time = time.time() - start
info = '\n teacher cls_acc:{:.2f}'.format(t_acc_record.avg)
print(info)
# train ssp_head
ssp_head_path = save_folder_ssp+'/ckpt_{}_ssp_head_epoch_{}.pth'.format(args.t_arch, args.t_epochs)
if not os.path.isfile(ssp_head_path):
if args.t_finetune:
print("\n Initialize teacher model by a pre-trained one")
## load pre-trained model
checkpoint = torch.load(args.init_teacher_path)
t_model.load_state_dict(checkpoint)
t_optimizer = optim.SGD([{'params':t_model.backbone.parameters(), 'lr':0.0},
{'params':t_model.proj_head.parameters(), 'lr':args.t_lr}],
momentum=args.momentum, weight_decay=args.weight_decay)
print("\n Start training SSP head >>>")
for epoch in range(args.t_epochs):
adjust_learning_rate1(t_optimizer, epoch)
t_model.eval()
loss_record = AverageMeter()
acc_record = AverageMeter()
start = time.time()
for x, _ in train_loader_ssp:
t_optimizer.zero_grad()
x = x.cuda()
c,h,w = x.size()[-3:]
x = x.view(-1, c, h, w)
_, rep, feat = t_model(x, bb_grad=False)
batch = int(x.size(0) / 4)
nor_index = (torch.arange(4*batch) % 4 == 0).cuda()
aug_index = (torch.arange(4*batch) % 4 != 0).cuda()
nor_rep = rep[nor_index]
aug_rep = rep[aug_index]
nor_rep = nor_rep.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
aug_rep = aug_rep.unsqueeze(2).expand(-1,-1,1*batch)
simi = F.cosine_similarity(aug_rep, nor_rep, dim=1)
target = torch.arange(batch).unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
loss = F.cross_entropy(simi, target)
loss.backward()
t_optimizer.step()
batch_acc = accuracy(simi, target, topk=(1,))[0]
loss_record.update(loss.item(), 3*batch)
acc_record.update(batch_acc.item(), 3*batch)
run_time = time.time() - start
info = '\n teacher_train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t ssp_loss:{:.3f}\t ssp_acc:{:.2f}'.format(
epoch+1, args.t_epochs, run_time, loss_record.avg, acc_record.avg)
print(info)
t_model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, _ in val_loader:
x = x.cuda()
c,h,w = x.size()[-3:]
x = x.view(-1, c, h, w)
with torch.no_grad():
_, rep, feat = t_model(x)
batch = int(x.size(0) / 4)
nor_index = (torch.arange(4*batch) % 4 == 0).cuda()
aug_index = (torch.arange(4*batch) % 4 != 0).cuda()
nor_rep = rep[nor_index]
aug_rep = rep[aug_index]
nor_rep = nor_rep.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
aug_rep = aug_rep.unsqueeze(2).expand(-1,-1,1*batch)
simi = F.cosine_similarity(aug_rep, nor_rep, dim=1)
target = torch.arange(batch).unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
loss = F.cross_entropy(simi, target)
batch_acc = accuracy(simi, target, topk=(1,))[0]
acc_record.update(batch_acc.item(),3*batch)
loss_record.update(loss.item(), 3*batch)
run_time = time.time() - start
info = '\r ssp_test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t ssp_loss:{:.3f}\t ssp_acc:{:.2f}'.format(
epoch+1, args.t_epochs, run_time, loss_record.avg, acc_record.avg)
print(info)
## end for epoch
## save teacher ckpt
torch.save(t_model.state_dict(), ssp_head_path)
else:
print("\n Loading pre-trained SSP head>>>")
checkpoint = torch.load(ssp_head_path)
t_model.load_state_dict(checkpoint)
#####################################################
''' Train Student model via SSKD '''
lr_decay_epochs = (args.lr_decay_epochs).split("_")
lr_decay_epochs = [int(epoch) for epoch in lr_decay_epochs]
def adjust_learning_rate2(optimizer, epoch):
"""decrease the learning rate """
lr = args.lr
num_decays = len(lr_decay_epochs)
for decay_i in range(num_decays):
if epoch >= lr_decay_epochs[decay_i]:
lr = lr * args.lr_decay_factor
#end if epoch
#end for decay_i
for param_group in optimizer.param_groups:
param_group['lr'] = lr
## init. student net
s_model = model_dict[args.s_arch](num_classes=args.num_classes)
s_model = wrapper(module=s_model).cuda()
s_model_path = "ckpt_" + setting_name + "_epoch_{}".format(args.epochs)
if args.finetune:
print("\n Initialize student model by pre-trained one")
s_model_path = os.path.join(save_folder, s_model_path+'_finetune_last.pth')
## load pre-trained model
checkpoint = torch.load(args.init_student_path)
s_model.load_state_dict(checkpoint['model'])
else:
s_model_path = os.path.join(save_folder, s_model_path+'_last.pth')
# s_model = wrapper(module=s_model).cuda()
print('\r ' + s_model_path)
## optimizer
optimizer = optim.SGD(s_model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
## training
print("\n -----------------------------------------------------------------------------------------")
# training
if not os.path.isfile(s_model_path):
print("\n Start training {} as student net >>>".format(args.s_arch))
## resume training
if args.resume_epoch>0:
save_file = os.path.join(save_intrain_folder, "ckpt_{}_epoch_{}.pth".format(args.s_arch, args.resume_epoch))
checkpoint = torch.load(save_file)
s_model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
total_train_time = 0
for epoch in range(args.resume_epoch, args.epochs):
adjust_learning_rate2(optimizer, epoch)
##########################
# train
s_model.train()
loss1_record = AverageMeter()
loss2_record = AverageMeter()
loss3_record = AverageMeter()
loss4_record = AverageMeter()
cls_acc_record = AverageMeter()
ssp_acc_record = AverageMeter()
start = time.time()
for x, target in train_loader:
optimizer.zero_grad()
c,h,w = x.size()[-3:]
x = x.view(-1,c,h,w).cuda()
target = target.long().cuda()
batch = int(x.size(0) / 4)
nor_index = (torch.arange(4*batch) % 4 == 0).cuda()
aug_index = (torch.arange(4*batch) % 4 != 0).cuda()
output, s_feat, _ = s_model(x, bb_grad=True)
log_nor_output = F.log_softmax(output[nor_index] / args.kd_T, dim=1)
log_aug_output = F.log_softmax(output[aug_index] / args.tf_T, dim=1)
with torch.no_grad():
knowledge, t_feat, _ = t_model(x)
nor_knowledge = F.softmax(knowledge[nor_index] / args.kd_T, dim=1)
aug_knowledge = F.softmax(knowledge[aug_index] / args.tf_T, dim=1)
# error level ranking
aug_target = target.unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
rank = torch.argsort(aug_knowledge, dim=1, descending=True)
rank = torch.argmax(torch.eq(rank, aug_target.unsqueeze(1)).long(), dim=1) # groundtruth label's rank
index = torch.argsort(rank)
tmp = torch.nonzero(rank, as_tuple=True)[0]
wrong_num = tmp.numel()
correct_num = 3*batch - wrong_num
wrong_keep = int(wrong_num * args.ratio_tf)
index = index[:correct_num+wrong_keep]
distill_index_tf = torch.sort(index)[0]
s_nor_feat = s_feat[nor_index]
s_aug_feat = s_feat[aug_index]
s_nor_feat = s_nor_feat.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
s_aug_feat = s_aug_feat.unsqueeze(2).expand(-1,-1,1*batch)
s_simi = F.cosine_similarity(s_aug_feat, s_nor_feat, dim=1)
t_nor_feat = t_feat[nor_index]
t_aug_feat = t_feat[aug_index]
t_nor_feat = t_nor_feat.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
t_aug_feat = t_aug_feat.unsqueeze(2).expand(-1,-1,1*batch)
t_simi = F.cosine_similarity(t_aug_feat, t_nor_feat, dim=1)
t_simi = t_simi.detach()
aug_target = torch.arange(batch).unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
rank = torch.argsort(t_simi, dim=1, descending=True)
rank = torch.argmax(torch.eq(rank, aug_target.unsqueeze(1)).long(), dim=1) # groundtruth label's rank
index = torch.argsort(rank)
tmp = torch.nonzero(rank, as_tuple=True)[0]
wrong_num = tmp.numel()
correct_num = 3*batch - wrong_num
wrong_keep = int(wrong_num * args.ratio_ss)
index = index[:correct_num+wrong_keep]
distill_index_ss = torch.sort(index)[0]
log_simi = F.log_softmax(s_simi / args.ss_T, dim=1)
simi_knowledge = F.softmax(t_simi / args.ss_T, dim=1)
loss1 = F.cross_entropy(output[nor_index], target)
loss2 = F.kl_div(log_nor_output, nor_knowledge, reduction='batchmean') * args.kd_T * args.kd_T
loss3 = F.kl_div(log_aug_output[distill_index_tf], aug_knowledge[distill_index_tf], \
reduction='batchmean') * args.tf_T * args.tf_T
loss4 = F.kl_div(log_simi[distill_index_ss], simi_knowledge[distill_index_ss], \
reduction='batchmean') * args.ss_T * args.ss_T
loss = args.ce_weight * loss1 + args.kd_weight * loss2 + args.tf_weight * loss3 + args.ss_weight * loss4
loss.backward()
optimizer.step()
cls_batch_acc = accuracy(output[nor_index], target, topk=(1,))[0]
ssp_batch_acc = accuracy(s_simi, aug_target, topk=(1,))[0]
loss1_record.update(loss1.item(), batch)
loss2_record.update(loss2.item(), batch)
loss3_record.update(loss3.item(), len(distill_index_tf))
loss4_record.update(loss4.item(), len(distill_index_ss))
cls_acc_record.update(cls_batch_acc.item(), batch)
ssp_acc_record.update(ssp_batch_acc.item(), 3*batch)
run_time = time.time() - start
total_train_time += run_time
info = '\n student_train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t ce_loss:{:.3f}\t kd_loss:{:.3f}\t cls_acc:{:.2f}'.format(epoch+1, args.epochs, run_time, loss1_record.avg, loss2_record.avg, cls_acc_record.avg)
print(info)
##########################
# cls val
s_model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x[:,0,:,:,:].cuda()
target = target.long().cuda()
with torch.no_grad():
output, _, feat = s_model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
acc_record.update(batch_acc.item(), x.size(0))
loss_record.update(loss.item(), x.size(0))
run_time = time.time() - start
info = '\r student_test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t cls_acc:{:.2f}'.format(epoch+1, args.epochs, run_time, acc_record.avg)
print(info)
##########################
## save checkpoint
if (epoch+1) % args.save_interval==0 :
save_file = save_intrain_folder + "/ckpt_{}_epoch_{}.pth".format(args.s_arch, epoch+1)
torch.save({
'model': s_model.state_dict(),
'optimizer': optimizer.state_dict(),
}, save_file)
## end for epoch
print("\n End training CNN")
## save model
torch.save({'model': s_model.state_dict()}, s_model_path)
else:
print("\n Loading pre-trained {}.".format(args.s_arch))
checkpoint = torch.load(s_model_path)
s_model.load_state_dict(checkpoint['model'])
## end if
##########################
s_model = s_model.cuda()
s_model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x[:,0,:,:,:].cuda()
target = target.long().cuda()
with torch.no_grad():
output, _, feat = s_model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
acc_record.update(batch_acc.item(), x.size(0))
loss_record.update(loss.item(), x.size(0))
print('\n Test accuracy of {}: {:.2f}'.format(args.s_arch, acc_record.avg))
''' Dump test results '''
test_results_logging_fullpath = save_folder + '/test_results_S_{}_T_{}.txt'.format(args.s_arch, args.t_arch)
if not os.path.isfile(test_results_logging_fullpath):
test_results_logging_file = open(test_results_logging_fullpath, "w")
test_results_logging_file.close()
with open(test_results_logging_fullpath, 'a') as test_results_logging_file:
test_results_logging_file.write("\n===================================================================================================")
test_results_logging_file.write("\n Teacher: {}; Student: {}; seed: {} \n".format(args.t_arch, args.s_arch, args.seed))
print(args, file=test_results_logging_file)
test_results_logging_file.write("\n Teacher test accuracy {}.".format(t_acc_record.avg))
test_results_logging_file.write("\n Student test accuracy {}.".format(acc_record.avg))
print("\n ===================================================================================================")
| 20,500 | 40.002 | 217 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/wrapper.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class wrapper(nn.Module):
def __init__(self, module):
super(wrapper, self).__init__()
self.backbone = module
feat_dim = list(module.children())[-1].in_features
self.proj_head = nn.Sequential(
nn.Linear(feat_dim, feat_dim),
nn.ReLU(inplace=True),
nn.Linear(feat_dim, feat_dim)
)
def forward(self, x, bb_grad=True):
feats, out = self.backbone(x, is_feat=True)
feat = feats[-1].view(feats[-1].size(0), -1)
if not bb_grad:
feat = feat.detach()
return out, self.proj_head(feat), feat
| 718 | 24.678571 | 66 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/utils.py | import os
import logging
import numpy as np
import torch
from torch.nn import init
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0
self.sum = 0.0
self.val = 0.0
self.avg = 0.0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def norm(x):
n = np.linalg.norm(x)
return x / n
| 1,014 | 20.145833 | 69 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/student_dataset.py | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
import pickle
import torch
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data
from itertools import permutations
import h5py
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, train=True, num_classes=100, real_data="", fake_data="None", nfake=1e30):
super(IMGs_dataset, self).__init__()
self.train = train
self.num_classes = num_classes
trainset_h5py_file = os.path.join(real_data, 'ImageNet_128x128_100Class.h5')
hf = h5py.File(trainset_h5py_file, 'r')
self.images = hf['images_train'][:]
self.labels = hf['labels_train'][:]
hf.close()
if fake_data != 'None':
## load fake data
with h5py.File(fake_data, "r") as f:
fake_images = f['fake_images'][:]
fake_labels = f['fake_labels'][:]
fake_labels = fake_labels.astype(int)
if nfake<len(fake_labels):
indx_fake = []
for i in range(num_classes):
indx_fake_i = np.where(fake_labels==i)[0]
if i != (num_classes-1):
nfake_i = nfake//num_classes
else:
nfake_i = nfake-(nfake//num_classes)*i
indx_fake_i = np.random.choice(indx_fake_i, size=min(int(nfake_i), len(indx_fake_i)), replace=False)
indx_fake.append(indx_fake_i)
#end for i
indx_fake = np.concatenate(indx_fake)
fake_images = fake_images[indx_fake]
fake_labels = fake_labels[indx_fake]
## concatenate
self.images = np.concatenate((self.images, fake_images), axis=0)
self.labels = np.concatenate((self.labels, fake_labels), axis=0)
self.labels = self.labels.astype(int)
print("\n Training images shape: ", self.images.shape)
## compute the mean and std for normalization
means = []
stds = []
assert (self.images).shape[1]==3
for i in range(3):
images_i = self.images[:,i,:,:]
images_i = images_i/255.0
means.append(np.mean(images_i))
stds.append(np.std(images_i))
## for i
## transforms
self.transform = transforms.Compose([
# transforms.RandomCrop(128, padding=4),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, stds),
])
if not self.train: ## NOTE: test mode
trainset_h5py_file = os.path.join(real_data, 'ImageNet_128x128_100Class.h5')
hf = h5py.File(trainset_h5py_file, 'r')
self.images = hf['images_valid'][:]
self.labels = hf['labels_valid'][:]
hf.close()
## transforms
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(means, stds),
])
self.images = self.images.transpose((0, 2, 3, 1)) # convert to HWC; after computing means and stds
self.n_images = len(self.images)
def __getitem__(self, index):
img, label = self.images[index], self.labels[index]
if self.train:
if np.random.rand() < 0.5:
img = img[:,::-1,:]
img0 = np.rot90(img, 0).copy()
img0 = Image.fromarray(img0)
img0 = self.transform(img0)
img1 = np.rot90(img, 1).copy()
img1 = Image.fromarray(img1)
img1 = self.transform(img1)
img2 = np.rot90(img, 2).copy()
img2 = Image.fromarray(img2)
img2 = self.transform(img2)
img3 = np.rot90(img, 3).copy()
img3 = Image.fromarray(img3)
img3 = self.transform(img3)
img = torch.stack([img0,img1,img2,img3])
return img, label
def __len__(self):
return self.n_images
| 4,236 | 32.362205 | 120 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
# self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1,
# bias=False)
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100, h=h//2
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(num_filters[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == blocks-1)))
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.relu)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.max_pool2d(x, 3, stride=2, padding=1) ##extra maxpool2d; 32x32
f0 = x
x, f1_pre = self.layer1(x) # 32x32
f1 = x
x, f2_pre = self.layer2(x) # 16x16
f2 = x
x, f3_pre = self.layer3(x) # 8x8
f3 = x
x = self.avgpool(x)
x = x.view(x.size(0), -1)
f4 = x
x = self.fc(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], x
else:
return [f0, f1, f2, f3, f4], x
else:
return x
def resnet8(**kwargs):
return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14(**kwargs):
return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet20(**kwargs):
return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14x05(**kwargs):
return ResNet(14, [8, 8, 16, 32], 'basicblock', **kwargs)
def resnet20x05(**kwargs):
return ResNet(20, [8, 8, 16, 32], 'basicblock', **kwargs)
def resnet20x0375(**kwargs):
return ResNet(20, [6, 6, 12, 24], 'basicblock', **kwargs)
def resnet32(**kwargs):
return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet44(**kwargs):
return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet56(**kwargs):
return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet110(**kwargs):
return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet8x4(**kwargs):
return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet32x4(**kwargs):
return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs)
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = resnet8x4(num_classes=20)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 8,219 | 29.557621 | 122 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/mobilenetv2.py | """
MobileNetV2 implementation used in
<Knowledge Distillation via Route Constrained Optimization>
"""
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
__all__ = ['mobilenetv2_T_w', 'mobile_half']
BN = None
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.blockname = None
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.names = ['0', '1', '2', '3', '4', '5', '6', '7']
def forward(self, x):
t = x
if self.use_res_connect:
return t + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
"""mobilenetV2"""
def __init__(self, T,
feature_dim,
input_size=128,
width_mult=1.,
remove_avg=False):
super(MobileNetV2, self).__init__()
self.remove_avg = remove_avg
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[T, 24, 2, 2], #s=1 for cifar
[T, 32, 3, 2],
[T, 64, 4, 2],
[T, 96, 3, 1],
[T, 160, 3, 2],
[T, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(32 * width_mult)
self.conv1 = conv_bn(3, input_channel, 2)
# building inverted residual blocks
self.blocks = nn.ModuleList([])
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * width_mult)
layers = []
strides = [s] + [1] * (n - 1)
for stride in strides:
layers.append(
InvertedResidual(input_channel, output_channel, stride, t)
)
input_channel = output_channel
self.blocks.append(nn.Sequential(*layers))
self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
self.conv2 = conv_1x1_bn(input_channel, self.last_channel)
# H = input_size // (32//2)
# self.avgpool = nn.AvgPool2d(H, ceil_mode=True)
# building classifier
#self.classifier = nn.Sequential(
# # nn.Dropout(0.5),
# nn.Linear(self.last_channel, feature_dim),
#)
self.classifier = nn.Linear(self.last_channel, feature_dim)
self._initialize_weights()
print(T, width_mult)
def get_bn_before_relu(self):
bn1 = self.blocks[1][-1].conv[-1]
bn2 = self.blocks[2][-1].conv[-1]
bn3 = self.blocks[4][-1].conv[-1]
bn4 = self.blocks[6][-1].conv[-1]
return [bn1, bn2, bn3, bn4]
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.blocks)
return feat_m
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.blocks[0](out)
out = self.blocks[1](out)
f1 = out
out = self.blocks[2](out)
f2 = out
out = self.blocks[3](out)
out = self.blocks[4](out)
f3 = out
out = self.blocks[5](out)
out = self.blocks[6](out)
f4 = out
out = self.conv2(out)
if not self.remove_avg:
# out = self.avgpool(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
f5 = out
out = self.classifier(out)
if is_feat:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobilenetv2_T_w(T, W, feature_dim=100):
model = MobileNetV2(T=T, feature_dim=feature_dim, width_mult=W)
return model
def mobile_half(num_classes):
return mobilenetv2_T_w(6, 0.5, num_classes)
if __name__ == '__main__':
x = torch.randn(2, 3, 128, 128)
net = mobile_half(100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,886 | 27.57767 | 115 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, cfg, batch_norm=False, num_classes=1000):
super(VGG, self).__init__()
self.block0 = self._make_layers(cfg[0], batch_norm, 3)
self.block1 = self._make_layers(cfg[1], batch_norm, cfg[0][-1])
self.block2 = self._make_layers(cfg[2], batch_norm, cfg[1][-1])
self.block3 = self._make_layers(cfg[3], batch_norm, cfg[2][-1])
self.block4 = self._make_layers(cfg[4], batch_norm, cfg[3][-1])
self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.pool4 = nn.AdaptiveAvgPool2d((1, 1)) #for cifar
self.pool4 = nn.MaxPool2d(kernel_size=4, stride=4) #for imagenet100
self.some_fc = nn.Sequential(
nn.Linear(512 * 4 * 4, 1024),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(1024, 1024),
nn.ReLU(True),
nn.Dropout(0.5),
)
self.classifier = nn.Linear(1024, num_classes)
self._initialize_weights()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.block0)
feat_m.append(self.pool0)
feat_m.append(self.block1)
feat_m.append(self.pool1)
feat_m.append(self.block2)
feat_m.append(self.pool2)
feat_m.append(self.block3)
feat_m.append(self.pool3)
feat_m.append(self.block4)
feat_m.append(self.pool4)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block1[-1]
bn2 = self.block2[-1]
bn3 = self.block3[-1]
bn4 = self.block4[-1]
return [bn1, bn2, bn3, bn4]
def forward(self, x, is_feat=False, preact=False):
h = x.shape[2]
x = F.relu(self.block0(x))
f0 = x
x = self.pool0(x)
x = self.block1(x)
f1_pre = x
x = F.relu(x)
f1 = x
x = self.pool1(x)
x = self.block2(x)
f2_pre = x
x = F.relu(x)
f2 = x
x = self.pool2(x)
x = self.block3(x)
f3_pre = x
x = F.relu(x)
f3 = x
if h == 64:
x = self.pool3(x)
x = self.block4(x)
f4_pre = x
x = F.relu(x)
f4 = x
x = self.pool4(x)
x = x.view(x.size(0), -1)
x = self.some_fc(x)
f5 = x
x = self.classifier(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], x
else:
return [f0, f1, f2, f3, f4, f5], x
else:
return x
@staticmethod
def _make_layers(cfg, batch_norm=False, in_channels=3):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
layers = layers[:-1]
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [[64], [128], [256, 256], [512, 512], [512, 512]],
'B': [[64, 64], [128, 128], [256, 256], [512, 512], [512, 512]],
'D': [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]],
'E': [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]],
'S': [[64], [128], [256], [512], [512]],
}
def vgg8(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], **kwargs)
return model
def vgg8_bn(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], batch_norm=True, **kwargs)
return model
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['A'], **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(cfg['A'], batch_norm=True, **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['B'], **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(cfg['B'], batch_norm=True, **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['D'], **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(cfg['D'], batch_norm=True, **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['E'], **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(cfg['E'], batch_norm=True, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = vgg19_bn(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,269 | 28.433198 | 98 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/classifier.py | from __future__ import print_function
import torch.nn as nn
#########################################
# ===== Classifiers ===== #
#########################################
class LinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=10):
super(LinearClassifier, self).__init__()
self.net = nn.Linear(dim_in, n_label)
def forward(self, x):
return self.net(x)
class NonLinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=10, p=0.1):
super(NonLinearClassifier, self).__init__()
self.net = nn.Sequential(
nn.Linear(dim_in, 200),
nn.Dropout(p=p),
nn.BatchNorm1d(200),
nn.ReLU(inplace=True),
nn.Linear(200, n_label),
)
def forward(self, x):
return self.net(x)
| 819 | 21.777778 | 51 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/resnetv2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100, h=h//2
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# # Zero-initialize the last BN in each residual branch,
# # so that the residual branch starts with zeros, and each residual block behaves like an identity.
# # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
# if zero_init_residual:
# for m in self.modules():
# if isinstance(m, Bottleneck):
# nn.init.constant_(m.bn3.weight, 0)
# elif isinstance(m, BasicBlock):
# nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out, f4_pre = self.layer4(out)
f4 = out
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
if preact:
return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]
else:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def ResNet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def ResNet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def ResNet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def ResNet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def ResNet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if __name__ == '__main__':
net = ResNet18(num_classes=100)
x = torch.randn(2, 3, 128, 128)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,133 | 34.492537 | 110 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/ShuffleNetv1.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.stride = stride
mid_planes = int(out_planes/4)
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
preact = torch.cat([out, res], 1) if self.stride == 2 else out+res
out = F.relu(preact)
# out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
if self.is_last:
return out, preact
else:
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], num_classes)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes,
stride=stride,
groups=groups,
is_last=(i == num_blocks - 1)))
self.in_planes = out_planes
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNet currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
def ShuffleV1(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
if __name__ == '__main__':
x = torch.randn(2, 3, 128, 128)
net = ShuffleV1(num_classes=100)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 4,904 | 33.787234 | 126 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/util.py | from __future__ import print_function
import torch.nn as nn
import math
class Paraphraser(nn.Module):
"""Paraphrasing Complex Network: Network Compression via Factor Transfer"""
def __init__(self, t_shape, k=0.5, use_bn=False):
super(Paraphraser, self).__init__()
in_channel = t_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(out_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s, is_factor=False):
factor = self.encoder(f_s)
if is_factor:
return factor
rec = self.decoder(factor)
return factor, rec
class Translator(nn.Module):
def __init__(self, s_shape, t_shape, k=0.5, use_bn=True):
super(Translator, self).__init__()
in_channel = s_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s):
return self.encoder(f_s)
class Connector(nn.Module):
"""Connect for Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons"""
def __init__(self, s_shapes, t_shapes):
super(Connector, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
@staticmethod
def _make_conenctors(s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
connectors = []
for s, t in zip(s_shapes, t_shapes):
if s[1] == t[1] and s[2] == t[2]:
connectors.append(nn.Sequential())
else:
connectors.append(ConvReg(s, t, use_relu=False))
return connectors
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConnectorV2(nn.Module):
"""A Comprehensive Overhaul of Feature Distillation (ICCV 2019)"""
def __init__(self, s_shapes, t_shapes):
super(ConnectorV2, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
def _make_conenctors(self, s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
t_channels = [t[1] for t in t_shapes]
s_channels = [s[1] for s in s_shapes]
connectors = nn.ModuleList([self._build_feature_connector(t, s)
for t, s in zip(t_channels, s_channels)])
return connectors
@staticmethod
def _build_feature_connector(t_channel, s_channel):
C = [nn.Conv2d(s_channel, t_channel, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(t_channel)]
for m in C:
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return nn.Sequential(*C)
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConvReg(nn.Module):
"""Convolutional regression for FitNet"""
def __init__(self, s_shape, t_shape, use_relu=True):
super(ConvReg, self).__init__()
self.use_relu = use_relu
s_N, s_C, s_H, s_W = s_shape
t_N, t_C, t_H, t_W = t_shape
if s_H == 2 * t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(s_C, t_C, kernel_size=4, stride=2, padding=1)
elif s_H >= t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=(1+s_H-t_H, 1+s_W-t_W))
else:
raise NotImplemented('student size {}, teacher size {}'.format(s_H, t_H))
self.bn = nn.BatchNorm2d(t_C)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_relu:
return self.relu(self.bn(x))
else:
return self.bn(x)
class Regress(nn.Module):
"""Simple Linear Regression for hints"""
def __init__(self, dim_in=1024, dim_out=1024):
super(Regress, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.relu(x)
return x
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class LinearEmbed(nn.Module):
"""Linear Embedding"""
def __init__(self, dim_in=1024, dim_out=128):
super(LinearEmbed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
return x
class MLPEmbed(nn.Module):
"""non-linear embed by MLP"""
def __init__(self, dim_in=1024, dim_out=128):
super(MLPEmbed, self).__init__()
self.linear1 = nn.Linear(dim_in, 2 * dim_out)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(2 * dim_out, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.relu(self.linear1(x))
x = self.l2norm(self.linear2(x))
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
class Flatten(nn.Module):
"""flatten module"""
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
class PoolEmbed(nn.Module):
"""pool and embed"""
def __init__(self, layer=0, dim_out=128, pool_type='avg'):
super().__init__()
if layer == 0:
pool_size = 8
nChannels = 16
elif layer == 1:
pool_size = 8
nChannels = 16
elif layer == 2:
pool_size = 6
nChannels = 32
elif layer == 3:
pool_size = 4
nChannels = 64
elif layer == 4:
pool_size = 1
nChannels = 64
else:
raise NotImplementedError('layer not supported: {}'.format(layer))
self.embed = nn.Sequential()
if layer <= 3:
if pool_type == 'max':
self.embed.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.embed.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.embed.add_module('Flatten', Flatten())
self.embed.add_module('Linear', nn.Linear(nChannels*pool_size*pool_size, dim_out))
self.embed.add_module('Normalize', Normalize(2))
def forward(self, x):
return self.embed(x)
if __name__ == '__main__':
import torch
g_s = [
torch.randn(2, 16, 16, 16),
torch.randn(2, 32, 8, 8),
torch.randn(2, 64, 4, 4),
]
g_t = [
torch.randn(2, 32, 16, 16),
torch.randn(2, 64, 8, 8),
torch.randn(2, 128, 4, 4),
]
s_shapes = [s.shape for s in g_s]
t_shapes = [t.shape for t in g_t]
net = ConnectorV2(s_shapes, t_shapes)
out = net(g_s)
for f in out:
print(f.shape)
| 9,622 | 32.068729 | 107 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/ShuffleNetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
preact = self.bn3(self.conv3(out))
out = F.relu(preact)
# out = F.relu(self.bn3(self.conv3(out)))
preact = torch.cat([x1, preact], 1)
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
if self.is_last:
return out, preact
else:
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size, num_classes=10):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
# stride=1, padding=1, bias=False)
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], num_classes)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1)))
self.in_channels = out_channels
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNetV2 currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1)
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
configs = {
0.2: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 3, 3)
},
0.3: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 7, 3)
},
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def ShuffleV2(**kwargs):
model = ShuffleNetV2(net_size=1, **kwargs)
return model
if __name__ == '__main__':
net = ShuffleV2(num_classes=100)
x = torch.randn(3, 3, 128, 128)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 7,170 | 32.825472 | 107 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/SSKD/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
# self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100
self.bn0 = nn.BatchNorm2d(nChannels[0])
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
# out = self.conv1(x)
out = F.relu(self.bn0(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = wrn_40_1(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,786 | 32.258621 | 116 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/train_student.py | """
the general training framework
"""
from __future__ import print_function
import os
import argparse
import socket
import time
import torch
import torch.optim as optim
import torch.nn as nn
import torch.backends.cudnn as cudnn
from models import model_dict
from models.util import Embed, ConvReg, LinearEmbed
from models.util import Connector, Translator, Paraphraser
from dataset.imagenet100 import get_imagenet100_dataloaders, get_imagenet100_dataloaders_sample
from helper.util import adjust_learning_rate
from distiller_zoo import DistillKL, HintLoss, Attention, Similarity, Correlation, VIDLoss, RKDLoss
from distiller_zoo import PKT, ABLoss, FactorTransfer, KDSVD, FSP, NSTLoss
from crd.criterion import CRDLoss
from helper.loops import train_distill as train, validate
from helper.pretrain import init
def parse_option():
hostname = socket.gethostname()
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--root_path', type=str, default='')
parser.add_argument('--data_path', type=str, default='')
## use cGAN-generated fake data
parser.add_argument('--use_fake_data', action='store_true', default=False)
parser.add_argument('--fake_data_path', type=str, default='')
parser.add_argument('--nfake', type=int, default=100000)
parser.add_argument('--finetune', action='store_true', default=False)
parser.add_argument('--init_student_path', type=str, default='')
parser.add_argument('--print_freq', type=int, default=100, help='print frequency')
parser.add_argument('--tb_freq', type=int, default=500, help='tb frequency')
parser.add_argument('--save_freq', type=int, default=20, help='save frequency')
parser.add_argument('--batch_size', type=int, default=128, help='batch_size')
parser.add_argument('--num_workers', type=int, default=0, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=240, help='number of training epochs')
parser.add_argument('--init_epochs', type=int, default=30, help='init training for two-stage methods')
parser.add_argument('--resume_epoch', type=int, default=0)
# optimization
parser.add_argument('--learning_rate', type=float, default=0.05, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='150,180,210', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
# model
parser.add_argument('--model_s', type=str, default='resnet8')
parser.add_argument('--path_t', type=str, default=None, help='teacher model snapshot')
# distillation
parser.add_argument('--distill', type=str, default='kd', choices=['kd', 'hint', 'attention', 'similarity',
'correlation', 'vid', 'crd', 'kdsvd', 'fsp',
'rkd', 'pkt', 'abound', 'factor', 'nst'])
parser.add_argument('--trial', type=str, default='1', help='trial id')
parser.add_argument('-r', '--gamma', type=float, default=1, help='weight for classification')
parser.add_argument('-a', '--alpha', type=float, default=None, help='weight balance for KD')
parser.add_argument('-b', '--beta', type=float, default=None, help='weight balance for other losses')
# KL distillation
parser.add_argument('--kd_T', type=float, default=4, help='temperature for KD distillation')
# NCE distillation
parser.add_argument('--feat_dim', default=128, type=int, help='feature dimension')
parser.add_argument('--mode', default='exact', type=str, choices=['exact', 'relax'])
parser.add_argument('--nce_k', default=16384, type=int, help='number of negative samples for NCE')
parser.add_argument('--nce_t', default=0.07, type=float, help='temperature parameter for softmax')
parser.add_argument('--nce_m', default=0.5, type=float, help='momentum for non-parametric updates')
# hint layer
parser.add_argument('--hint_layer', default=2, type=int, choices=[0, 1, 2, 3, 4])
opt = parser.parse_args()
# set different learning rate from these 4 models
if (opt.model_s in ['MobileNetV2', 'ShuffleV1', 'ShuffleV2']) and not opt.finetune:
opt.learning_rate = 0.01
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_t = get_teacher_name(opt.path_t)
if (not opt.use_fake_data) or opt.nfake<=0:
opt.save_folder = os.path.join(opt.root_path, 'output/student_models/vanilla')
else:
fake_data_name = opt.fake_data_path.split('/')[-1]
opt.save_folder = os.path.join(opt.root_path, 'output/student_models/{}_useNfake_{}'.format(fake_data_name, opt.nfake))
os.makedirs(opt.save_folder, exist_ok=True)
opt.model_name = 'S_{}_T_{}_{}_r_{}_a_{}_b_{}_{}'.format(opt.model_s, opt.model_t, opt.distill,
opt.gamma, opt.alpha, opt.beta, opt.trial)
opt.save_intrain_folder = os.path.join(opt.save_folder, 'ckpts_in_train', opt.model_name)
os.makedirs(opt.save_intrain_folder, exist_ok=True)
return opt
def get_teacher_name(model_path):
"""parse teacher name"""
segments = model_path.split('/')[-1].split('_')
if segments[1] != 'wrn':
return segments[1]
else:
return segments[1] + '_' + segments[2] + '_' + segments[3]
def load_teacher(model_path, n_cls):
print('==> loading teacher model')
model_t = get_teacher_name(model_path)
model = model_dict[model_t](num_classes=n_cls)
model.load_state_dict(torch.load(model_path)['model'])
print('==> done')
return model
def main():
best_acc = 0
opt = parse_option()
# torch.manual_seed(2021)
# torch.cuda.manual_seed(2021)
# torch.backends.cudnn.deterministic = True
# dataloader
if opt.distill in ['crd']:
train_loader, val_loader, n_data = get_imagenet100_dataloaders_sample(opt.data_path, batch_size=opt.batch_size,
num_workers=opt.num_workers,
k=opt.nce_k,
mode=opt.mode,
use_fake_data=opt.use_fake_data, fake_data_folder=opt.fake_data_path, nfake=opt.nfake)
else:
train_loader, val_loader, n_data = get_imagenet100_dataloaders(opt.data_path, batch_size=opt.batch_size,
num_workers=opt.num_workers,
is_instance=True,
use_fake_data=opt.use_fake_data, fake_data_folder=opt.fake_data_path, nfake=opt.nfake)
n_cls = 100
# model
model_t = load_teacher(opt.path_t, n_cls)
model_s = model_dict[opt.model_s](num_classes=n_cls)
## student model name, how to initialize student model, etc.
student_model_filename = 'S_{}_T_{}_{}_r_{}_a_{}_b_{}_epoch_{}'.format(opt.model_s, opt.model_t, opt.distill,opt.gamma, opt.alpha, opt.beta, opt.epochs)
if opt.finetune:
ckpt_cnn_filename = os.path.join(opt.save_folder, student_model_filename+'_finetune_True_last.pth')
## load pre-trained model
checkpoint = torch.load(opt.init_student_path)
model_s.load_state_dict(checkpoint['model'])
else:
ckpt_cnn_filename = os.path.join(opt.save_folder,student_model_filename+'_last.pth')
print('\n ' + ckpt_cnn_filename)
data = torch.randn(2, 3, 128, 128)
model_t.eval()
model_s.eval()
feat_t, _ = model_t(data, is_feat=True)
feat_s, _ = model_s(data, is_feat=True)
module_list = nn.ModuleList([])
module_list.append(model_s)
trainable_list = nn.ModuleList([])
trainable_list.append(model_s)
criterion_cls = nn.CrossEntropyLoss()
criterion_div = DistillKL(opt.kd_T)
if opt.distill == 'kd':
criterion_kd = DistillKL(opt.kd_T)
elif opt.distill == 'hint':
criterion_kd = HintLoss()
regress_s = ConvReg(feat_s[opt.hint_layer].shape, feat_t[opt.hint_layer].shape)
module_list.append(regress_s)
trainable_list.append(regress_s)
elif opt.distill == 'crd':
opt.s_dim = feat_s[-1].shape[1]
opt.t_dim = feat_t[-1].shape[1]
opt.n_data = n_data
criterion_kd = CRDLoss(opt)
module_list.append(criterion_kd.embed_s)
module_list.append(criterion_kd.embed_t)
trainable_list.append(criterion_kd.embed_s)
trainable_list.append(criterion_kd.embed_t)
elif opt.distill == 'attention':
criterion_kd = Attention()
elif opt.distill == 'nst':
criterion_kd = NSTLoss()
elif opt.distill == 'similarity':
criterion_kd = Similarity()
elif opt.distill == 'rkd':
criterion_kd = RKDLoss()
elif opt.distill == 'pkt':
criterion_kd = PKT()
elif opt.distill == 'kdsvd':
criterion_kd = KDSVD()
elif opt.distill == 'correlation':
criterion_kd = Correlation()
embed_s = LinearEmbed(feat_s[-1].shape[1], opt.feat_dim)
embed_t = LinearEmbed(feat_t[-1].shape[1], opt.feat_dim)
module_list.append(embed_s)
module_list.append(embed_t)
trainable_list.append(embed_s)
trainable_list.append(embed_t)
elif opt.distill == 'vid':
s_n = [f.shape[1] for f in feat_s[1:-1]]
t_n = [f.shape[1] for f in feat_t[1:-1]]
criterion_kd = nn.ModuleList(
[VIDLoss(s, t, t) for s, t in zip(s_n, t_n)]
)
# add this as some parameters in VIDLoss need to be updated
trainable_list.append(criterion_kd)
elif opt.distill == 'abound':
s_shapes = [f.shape for f in feat_s[1:-1]]
t_shapes = [f.shape for f in feat_t[1:-1]]
connector = Connector(s_shapes, t_shapes)
# init stage training
init_trainable_list = nn.ModuleList([])
init_trainable_list.append(connector)
init_trainable_list.append(model_s.get_feat_modules())
criterion_kd = ABLoss(len(feat_s[1:-1]))
init(model_s, model_t, init_trainable_list, criterion_kd, train_loader, opt)
# classification
module_list.append(connector)
elif opt.distill == 'factor':
s_shape = feat_s[-2].shape
t_shape = feat_t[-2].shape
paraphraser = Paraphraser(t_shape)
translator = Translator(s_shape, t_shape)
# init stage training
init_trainable_list = nn.ModuleList([])
init_trainable_list.append(paraphraser)
criterion_init = nn.MSELoss()
init(model_s, model_t, init_trainable_list, criterion_init, train_loader, opt)
# classification
criterion_kd = FactorTransfer()
module_list.append(translator)
module_list.append(paraphraser)
trainable_list.append(translator)
elif opt.distill == 'fsp':
s_shapes = [s.shape for s in feat_s[:-1]]
t_shapes = [t.shape for t in feat_t[:-1]]
criterion_kd = FSP(s_shapes, t_shapes)
# init stage training
init_trainable_list = nn.ModuleList([])
init_trainable_list.append(model_s.get_feat_modules())
init(model_s, model_t, init_trainable_list, criterion_kd, train_loader, opt)
# classification training
pass
else:
raise NotImplementedError(opt.distill)
criterion_list = nn.ModuleList([])
criterion_list.append(criterion_cls) # classification loss
criterion_list.append(criterion_div) # KL divergence loss, original knowledge distillation
criterion_list.append(criterion_kd) # other knowledge distillation loss
# optimizer
optimizer = optim.SGD(trainable_list.parameters(),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
# append teacher after optimizer to avoid weight_decay
module_list.append(model_t)
if torch.cuda.is_available():
module_list.cuda()
criterion_list.cuda()
cudnn.benchmark = True
# validate teacher accuracy
teacher_acc, _, _ = validate(val_loader, model_t, criterion_cls, opt)
print('teacher accuracy: ', teacher_acc)
if not os.path.isfile(ckpt_cnn_filename):
print("\n Start training the {} >>>".format(opt.model_s))
## resume training
if opt.resume_epoch>0:
save_file = opt.save_intrain_folder + "/ckpt_{}_epoch_{}.pth".format(opt.model_s, opt.resume_epoch)
checkpoint = torch.load(save_file)
model_s.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
module_list.load_state_dict(checkpoint['module_list'])
trainable_list.load_state_dict(checkpoint['trainable_list'])
criterion_list.load_state_dict(checkpoint['criterion_list'])
# module_list = checkpoint['module_list']
# criterion_list = checkpoint['criterion_list']
# # trainable_list = checkpoint['trainable_list']
# ckpt_test_accuracy = checkpoint['accuracy']
# ckpt_epoch = checkpoint['epoch']
# print('\n Resume training: epoch {}, test_acc {}...'.format(ckpt_epoch, ckpt_test_accuracy))
if torch.cuda.is_available():
module_list.cuda()
criterion_list.cuda()
#end if
for epoch in range(opt.resume_epoch, opt.epochs):
adjust_learning_rate(epoch, opt, optimizer)
print("==> training...")
time1 = time.time()
train_acc, train_loss = train(epoch, train_loader, module_list, criterion_list, optimizer, opt)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
test_acc, tect_acc_top5, test_loss = validate(val_loader, model_s, criterion_cls, opt)
# regular saving
if (epoch+1) % opt.save_freq == 0:
print('==> Saving...')
state = {
'epoch': epoch,
'model': model_s.state_dict(),
'optimizer': optimizer.state_dict(),
'module_list': module_list.state_dict(),
'criterion_list': criterion_list.state_dict(),
'trainable_list': trainable_list.state_dict(),
'accuracy': test_acc,
}
save_file = os.path.join(opt.save_intrain_folder, 'ckpt_{}_epoch_{}.pth'.format(opt.model_s, epoch+1))
torch.save(state, save_file)
##end for epoch
# store model
torch.save({
'opt':opt,
'model': model_s.state_dict(),
}, ckpt_cnn_filename)
print("\n End training CNN.")
else:
print("\n Loading pre-trained {}.".format(opt.model_s))
checkpoint = torch.load(ckpt_cnn_filename)
model_s.load_state_dict(checkpoint['model'])
test_acc, test_acc_top5, _ = validate(val_loader, model_s, criterion_cls, opt)
print("\n {}, test_acc:{:.3f}, test_acc_top5:{:.3f}.".format(opt.model_s, test_acc, test_acc_top5))
eval_results_fullpath = opt.save_folder + "/test_result_" + opt.model_name + ".txt"
if not os.path.isfile(eval_results_fullpath):
eval_results_logging_file = open(eval_results_fullpath, "w")
eval_results_logging_file.close()
with open(eval_results_fullpath, 'a') as eval_results_logging_file:
eval_results_logging_file.write("\n===================================================================================================")
eval_results_logging_file.write("\n Test results for {} \n".format(opt.model_name))
print(opt, file=eval_results_logging_file)
eval_results_logging_file.write("\n Test accuracy: Top1 {:.3f}, Top5 {:.3f}.".format(test_acc, test_acc_top5))
eval_results_logging_file.write("\n Test error rate: Top1 {:.3f}, Top5 {:.3f}.".format(100-test_acc, 100-test_acc_top5))
if __name__ == '__main__':
print("\n ===================================================================================================")
main()
print("\n ===================================================================================================")
| 16,847 | 42.2 | 162 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/test_infer_speed.py | import os
import argparse
import shutil
import timeit
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import random
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch import autograd
from torchvision.utils import save_image
import csv
from tqdm import tqdm
import gc
import h5py
### import my stuffs ###
from models import model_dict
parser = argparse.ArgumentParser()
parser.add_argument('--cnn_name', type=str, default='',
help='The CNN used in the classification.')
parser.add_argument('--nsamp', type=int, default=10000, help='number of images')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--img_size', type=int, default=128)
args = parser.parse_args()
## torchstat does not support ResNet50
# from torchstat import stat
# net = model_dict[args.cnn_name](num_classes=100)
# stat(net, (3, 32, 32))
################################################################################
# Convenience function to count the number of parameters in a module
def count_parameters(module, verbose=True):
num_parameters = sum([p.data.nelement() for p in module.parameters()])
if verbose:
print('Number of parameters: {}'.format(num_parameters))
return num_parameters
# model
net = model_dict[args.cnn_name](num_classes=100)
num_parameters = count_parameters(net, verbose=False)
## randomly generate args.nsamp images
images = np.random.randint(low=0, high=255, size=args.nsamp*3*args.img_size**2).reshape((args.nsamp, 3, args.img_size, args.img_size))
print(images.shape)
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, normalize=False):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.normalize = normalize
def __getitem__(self, index):
image = self.images[index]
if self.normalize:
image = image/255.0
image = (image-0.5)/0.5
if self.labels is not None:
label = self.labels[index]
return (image, label)
else:
return image
def __len__(self):
return self.n_images
trainset = IMGs_dataset(images, None, normalize=True)
dataloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False)
net = net.cuda()
net.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
start_time = timeit.default_timer()
for _, images in enumerate(dataloader):
images = images.type(torch.float).cuda()
outputs = net(images)
total_time = timeit.default_timer()-start_time
print("\n {} has {} parameters...".format(args.cnn_name, num_parameters))
print('\r Total time: {}s; Inference FPS: {}'.format(total_time, args.nsamp/total_time))
| 3,225 | 30.019231 | 143 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/train_teacher.py | from __future__ import print_function
import os
import argparse
import socket
import time
# import tensorboard_logger as tb_logger
import torch
import torch.optim as optim
import torch.nn as nn
import torch.backends.cudnn as cudnn
from models import model_dict
from dataset.imagenet100 import get_imagenet100_dataloaders
from helper.util import adjust_learning_rate, accuracy, AverageMeter
from helper.loops import train_vanilla as train, validate
def parse_option():
hostname = socket.gethostname()
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--root_path', type=str, default='')
parser.add_argument('--data_path', type=str, default='')
## use cGAN-generated fake data
parser.add_argument('--use_fake_data', action='store_true', default=False)
parser.add_argument('--fake_data_path', type=str, default='')
parser.add_argument('--nfake', type=int, default=100000)
parser.add_argument('--finetune', action='store_true', default=False)
parser.add_argument('--init_model_path', type=str, default='')
parser.add_argument('--print_freq', type=int, default=100, help='print frequency')
parser.add_argument('--save_freq', type=int, default=20, help='save frequency')
parser.add_argument('--batch_size', type=int, default=128, help='batch_size')
parser.add_argument('--num_workers', type=int, default=0, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=240, help='number of training epochs')
parser.add_argument('--resume_epoch', type=int, default=0)
# optimization
parser.add_argument('--learning_rate', type=float, default=0.05, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='150,180,210', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--model', type=str, default='resnet110')
parser.add_argument('-t', '--trial', type=int, default=0, help='the experiment id')
opt = parser.parse_args()
# set different learning rate from these 4 models
if (opt.model in ['MobileNetV2', 'ShuffleV1', 'ShuffleV2']) and not opt.finetune:
opt.learning_rate = 0.01
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
if (not opt.use_fake_data) or opt.nfake<=0:
opt.save_folder = os.path.join(opt.root_path, 'output/teacher_models/vanilla')
opt.model_name = '{}_lr_{}_decay_{}_trial_{}'.format(opt.model, opt.learning_rate,
opt.weight_decay, opt.trial)
else:
fake_data_name = opt.fake_data_path.split('/')[-1]
opt.save_folder = os.path.join(opt.root_path, 'output/teacher_models/{}_useNfake_{}'.format(fake_data_name, opt.nfake))
opt.model_name = '{}_lr_{}_decay_{}_finetune_{}_trial_{}'.format(opt.model, opt.learning_rate,
opt.weight_decay, opt.finetune, opt.trial)
os.makedirs(opt.save_folder, exist_ok=True)
return opt
def main():
# best_acc = 0
opt = parse_option()
# dataloader
train_loader, val_loader = get_imagenet100_dataloaders(opt.data_path, batch_size=opt.batch_size, num_workers=opt.num_workers, use_fake_data=opt.use_fake_data, fake_data_folder=opt.fake_data_path, nfake=opt.nfake)
n_cls = 100
# model
model = model_dict[opt.model](num_classes=n_cls)
# optimizer
optimizer = optim.SGD(model.parameters(),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
criterion = nn.CrossEntropyLoss()
if torch.cuda.is_available():
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
if opt.finetune:
ckpt_cnn_filename = os.path.join(opt.save_folder, 'ckpt_{}_epoch_{}_finetune_True_last.pth'.format(opt.model, opt.epochs))
## load pre-trained model
checkpoint = torch.load(opt.init_model_path)
model.load_state_dict(checkpoint['model'])
else:
ckpt_cnn_filename = os.path.join(opt.save_folder, 'ckpt_{}_epoch_{}_last.pth'.format(opt.model, opt.epochs))
print('\n ' + ckpt_cnn_filename)
if not os.path.isfile(ckpt_cnn_filename):
print("\n Start training the {} >>>".format(opt.model))
opt.save_intrain_folder = os.path.join(opt.save_folder, 'ckpts_in_train', opt.model_name)
os.makedirs(opt.save_intrain_folder, exist_ok=True)
if opt.resume_epoch>0:
save_file = opt.save_intrain_folder + "/ckpt_{}_epoch_{}.pth".format(opt.model, opt.resume_epoch)
checkpoint = torch.load(save_file)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
#end if
for epoch in range(opt.resume_epoch, opt.epochs):
adjust_learning_rate(epoch, opt, optimizer)
print("==> training...")
time1 = time.time()
train_acc, train_loss = train(epoch, train_loader, model, criterion, optimizer, opt)
test_acc, test_acc_top5, test_loss = validate(val_loader, model, criterion, opt)
time2 = time.time()
print('\r epoch {}/{}: train_acc:{:.3f}, test_acc:{:.3f}, total time {:.2f}'.format(epoch+1, opt.epochs, train_acc, test_acc, time2 - time1))
# regular saving
if (epoch+1) % opt.save_freq == 0:
print('==> Saving...')
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'accuracy': test_acc,
}
save_file = os.path.join(opt.save_intrain_folder, 'ckpt_{}_epoch_{}.pth'.format(opt.model, epoch+1))
torch.save(state, save_file)
##end for epoch
# store model
torch.save({
'opt':opt,
'model': model.state_dict(),
}, ckpt_cnn_filename)
print("\n End training CNN.")
else:
print("\n Loading pre-trained {}.".format(opt.model))
checkpoint = torch.load(ckpt_cnn_filename)
model.load_state_dict(checkpoint['model'])
test_acc, test_acc_top5, _ = validate(val_loader, model, criterion, opt)
print("\n {}, test_acc:{:.3f}, test_acc_top5:{:.3f}.".format(opt.model, test_acc, test_acc_top5))
eval_results_fullpath = opt.save_folder + "/test_result_" + opt.model_name + ".txt"
if not os.path.isfile(eval_results_fullpath):
eval_results_logging_file = open(eval_results_fullpath, "w")
eval_results_logging_file.close()
with open(eval_results_fullpath, 'a') as eval_results_logging_file:
eval_results_logging_file.write("\n===================================================================================================")
eval_results_logging_file.write("\n Test results for {} \n".format(opt.model_name))
print(opt, file=eval_results_logging_file)
eval_results_logging_file.write("\n Test accuracy: Top1 {:.3f}, Top5 {:.3f}.".format(test_acc, test_acc_top5))
eval_results_logging_file.write("\n Test error rate: Top1 {:.3f}, Top5 {:.3f}.".format(100-test_acc, 100-test_acc_top5))
if __name__ == '__main__':
main()
| 7,684 | 41.458564 | 216 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/dataset/cifar100.py | from __future__ import print_function
import os
import socket
import numpy as np
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from PIL import Image
import h5py
import torch
import gc
"""
mean = {
'cifar100': (0.5071, 0.4867, 0.4408),
}
std = {
'cifar100': (0.2675, 0.2565, 0.2761),
}
"""
## for vanilla CNN training and KD other than CRD
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, transform=None, is_instance=False):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
self.is_instance = is_instance
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.transform = transform
def __getitem__(self, index):
## for RGB only
image = self.images[index]
if self.transform is not None:
image = np.transpose(image, (1, 2, 0)) #C * H * W ----> H * W * C
image = Image.fromarray(np.uint8(image), mode = 'RGB') #H * W * C
image = self.transform(image)
if self.labels is not None:
label = self.labels[index]
if self.is_instance:
return image, label, index
else:
return image, label
return image
def __len__(self):
return self.n_images
def get_cifar100_dataloaders(data_folder, batch_size=128, num_workers=0, is_instance=False, use_fake_data=False, fake_data_folder=None, nfake=1e30):
"""
cifar 100
"""
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
## get real data
train_set = datasets.CIFAR100(root=data_folder,
download=True,
train=True,
transform=train_transform)
real_images = train_set.data
real_images = np.transpose(real_images, (0, 3, 1, 2))
real_labels = np.array(train_set.targets)
num_classes=100
if use_fake_data:
## load fake data
with h5py.File(fake_data_folder, "r") as f:
train_images = f['fake_images'][:]
train_labels = f['fake_labels'][:]
train_labels = train_labels.astype(int)
if nfake<len(train_labels):
indx_fake = []
for i in range(num_classes):
indx_fake_i = np.where(train_labels==i)[0]
if i != (num_classes-1):
nfake_i = nfake//num_classes
else:
nfake_i = nfake-(nfake//num_classes)*i
indx_fake_i = np.random.choice(indx_fake_i, size=min(int(nfake_i), len(indx_fake_i)), replace=False)
indx_fake.append(indx_fake_i)
#end for i
indx_fake = np.concatenate(indx_fake)
train_images = train_images[indx_fake]
train_labels = train_labels[indx_fake]
## concatenate
train_images = np.concatenate((real_images, train_images), axis=0)
train_labels = np.concatenate((real_labels, train_labels), axis=0)
train_labels = train_labels.astype(int)
## compute normalizing constants
train_means = []
train_stds = []
for i in range(3):
images_i = train_images[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
print("\n Final training set's dimensiona: ", train_images.shape)
## make training set
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
train_set = IMGs_dataset(train_images, train_labels, transform=train_transform, is_instance=is_instance)
assert len(train_set)==len(train_images)
del train_images, train_labels; gc.collect()
else:
train_set = IMGs_dataset(real_images, real_labels, transform=train_transform, is_instance=is_instance)
train_loader = DataLoader(train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers)
test_set = datasets.CIFAR100(root=data_folder,
download=True,
train=False,
transform=test_transform)
test_loader = DataLoader(test_set,
batch_size=100,
shuffle=False,
num_workers=num_workers)
if is_instance:
n_data = len(train_set)
return train_loader, test_loader, n_data
else:
return train_loader, test_loader
## for CRD
class IMGs_dataset_CRD(torch.utils.data.Dataset):
def __init__(self, images, labels, transform=None, k=4096, mode='exact', is_sample=True, percent=1.0):
super(IMGs_dataset_CRD, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.transform = transform
self.k = k
self.mode = mode
self.is_sample = is_sample
num_classes = 100
num_samples = len(labels)
self.cls_positive = [[] for i in range(num_classes)]
for i in range(num_samples):
self.cls_positive[labels[i]].append(i)
self.cls_negative = [[] for i in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if j == i:
continue
self.cls_negative[i].extend(self.cls_positive[j])
self.cls_positive = [np.asarray(self.cls_positive[i]) for i in range(num_classes)]
self.cls_negative = [np.asarray(self.cls_negative[i]) for i in range(num_classes)]
if 0 < percent < 1:
n = int(len(self.cls_negative[0]) * percent)
self.cls_negative = [np.random.permutation(self.cls_negative[i])[0:n]
for i in range(num_classes)]
self.cls_positive = np.asarray(self.cls_positive)
self.cls_negative = np.asarray(self.cls_negative)
def __getitem__(self, index):
## for RGB only
image = self.images[index]
if self.transform is not None:
image = np.transpose(image, (1, 2, 0)) #C * H * W ----> H * W * C
image = Image.fromarray(np.uint8(image), mode = 'RGB') #H * W * C
image = self.transform(image)
label = self.labels[index]
if not self.is_sample:
# directly return
return image, label, index
else:
# sample contrastive examples
if self.mode == 'exact':
pos_idx = index
elif self.mode == 'relax':
pos_idx = np.random.choice(self.cls_positive[label], 1)
pos_idx = pos_idx[0]
else:
raise NotImplementedError(self.mode)
replace = True if self.k > len(self.cls_negative[label]) else False
neg_idx = np.random.choice(self.cls_negative[label], self.k, replace=replace)
sample_idx = np.hstack((np.asarray([pos_idx]), neg_idx))
return image, label, index, sample_idx
def __len__(self):
return self.n_images
def get_cifar100_dataloaders_sample(data_folder, batch_size=128, num_workers=0, k=4096, mode='exact',
is_sample=True, percent=1.0,
use_fake_data=False, fake_data_folder=None, nfake=1e30):
"""
cifar 100
"""
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
## get real data
train_set = datasets.CIFAR100(root=data_folder,
download=True,
train=True,
transform=train_transform)
real_images = train_set.data
real_images = np.transpose(real_images, (0, 3, 1, 2))
real_labels = np.array(train_set.targets)
num_classes=100
if use_fake_data:
## load fake data
with h5py.File(fake_data_folder, "r") as f:
train_images = f['fake_images'][:]
train_labels = f['fake_labels'][:]
train_labels = train_labels.astype(int)
if nfake<len(train_labels):
indx_fake = []
for i in range(num_classes):
indx_fake_i = np.where(train_labels==i)[0]
if i != (num_classes-1):
nfake_i = nfake//num_classes
else:
nfake_i = nfake-(nfake//num_classes)*i
indx_fake_i = np.random.choice(indx_fake_i, size=min(int(nfake_i), len(indx_fake_i)), replace=False)
indx_fake.append(indx_fake_i)
#end for i
indx_fake = np.concatenate(indx_fake)
train_images = train_images[indx_fake]
train_labels = train_labels[indx_fake]
## concatenate
train_images = np.concatenate((real_images, train_images), axis=0)
train_labels = np.concatenate((real_labels, train_labels), axis=0)
train_labels = train_labels.astype(int)
## compute normalizing constants
train_means = []
train_stds = []
for i in range(3):
images_i = train_images[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
print("\n Final training set's dimensiona: ", train_images.shape)
## make training set
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
train_set = IMGs_dataset_CRD(train_images, train_labels, transform=train_transform, k=k, mode=mode, is_sample=is_sample, percent=percent)
assert len(train_set)==len(train_images)
del train_images, train_labels; gc.collect()
else:
train_set = IMGs_dataset_CRD(real_images, real_labels, transform=train_transform, k=k, mode=mode, is_sample=is_sample, percent=percent)
n_data = len(train_set)
train_loader = DataLoader(train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers)
test_set = datasets.CIFAR100(root=data_folder,
download=True,
train=False,
transform=test_transform)
test_loader = DataLoader(test_set,
batch_size=100,
shuffle=False,
num_workers=num_workers)
return train_loader, test_loader, n_data | 12,737 | 35.394286 | 148 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/dataset/imagenet100.py | from __future__ import print_function
import os
import socket
import numpy as np
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from PIL import Image
import h5py
import torch
import gc
## for vanilla CNN training and KD other than CRD
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, transform=None, is_instance=False):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
self.is_instance = is_instance
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.transform = transform
def __getitem__(self, index):
## for RGB only
image = self.images[index]
if self.transform is not None:
image = np.transpose(image, (1, 2, 0)) #C * H * W ----> H * W * C
image = Image.fromarray(np.uint8(image), mode = 'RGB') #H * W * C
image = self.transform(image)
if self.labels is not None:
label = self.labels[index]
if self.is_instance:
return image, label, index
else:
return image, label
return image
def __len__(self):
return self.n_images
def get_imagenet100_dataloaders(data_folder, batch_size=128, num_workers=0, is_instance=False, use_fake_data=False, fake_data_folder=None, nfake=1e30):
"""
imagenet 100
"""
trainset_h5py_file = os.path.join(data_folder, 'ImageNet_128x128_100Class.h5')
hf = h5py.File(trainset_h5py_file, 'r')
real_images_train = hf['images_train'][:]
real_labels_train = hf['labels_train'][:]
real_images_test = hf['images_valid'][:]
real_labels_test = hf['labels_valid'][:]
hf.close()
num_classes=100
if use_fake_data:
## load fake data
with h5py.File(fake_data_folder, "r") as f:
train_images = f['fake_images'][:]
train_labels = f['fake_labels'][:]
train_labels = train_labels.astype(int)
if nfake<len(train_labels):
indx_fake = []
for i in range(num_classes):
indx_fake_i = np.where(train_labels==i)[0]
if i != (num_classes-1):
nfake_i = nfake//num_classes
else:
nfake_i = nfake-(nfake//num_classes)*i
indx_fake_i = np.random.choice(indx_fake_i, size=min(int(nfake_i), len(indx_fake_i)), replace=False)
indx_fake.append(indx_fake_i)
#end for i
indx_fake = np.concatenate(indx_fake)
train_images = train_images[indx_fake]
train_labels = train_labels[indx_fake]
## concatenate
train_images = np.concatenate((real_images_train, train_images), axis=0)
train_labels = np.concatenate((real_labels_train, train_labels), axis=0)
train_labels = train_labels.astype(int)
## compute normalizing constants
train_means = []
train_stds = []
for i in range(3):
images_i = train_images[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
print("\n Final training set's dimensiona: ", train_images.shape)
## make training set
train_transform = transforms.Compose([
# transforms.RandomCrop(128, padding=4),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
train_set = IMGs_dataset(train_images, train_labels, transform=train_transform, is_instance=is_instance)
assert len(train_set)==len(train_images)
del train_images, train_labels; gc.collect()
else:
## compute normalizing constants
train_means = []
train_stds = []
for i in range(3):
images_i = real_images_train[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
print("\n Final training set's dimensiona: ", real_images_train.shape)
## make training set
train_transform = transforms.Compose([
# transforms.RandomCrop(128, padding=4),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
train_set = IMGs_dataset(real_images_train, real_labels_train, transform=train_transform, is_instance=is_instance)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_set = IMGs_dataset(real_images_test, real_labels_test, transform=test_transform)
test_loader = DataLoader(test_set, batch_size=100, shuffle=False, num_workers=num_workers)
if is_instance:
n_data = len(train_set)
return train_loader, test_loader, n_data
else:
return train_loader, test_loader
## for CRD
class IMGs_dataset_CRD(torch.utils.data.Dataset):
def __init__(self, images, labels, transform=None, k=4096, mode='exact', is_sample=True, percent=1.0):
super(IMGs_dataset_CRD, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.transform = transform
self.k = k
self.mode = mode
self.is_sample = is_sample
num_classes = 100
num_samples = len(labels)
self.cls_positive = [[] for i in range(num_classes)]
for i in range(num_samples):
self.cls_positive[labels[i]].append(i)
self.cls_negative = [[] for i in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if j == i:
continue
self.cls_negative[i].extend(self.cls_positive[j])
self.cls_positive = [np.asarray(self.cls_positive[i]) for i in range(num_classes)]
self.cls_negative = [np.asarray(self.cls_negative[i]) for i in range(num_classes)]
if 0 < percent < 1:
n = int(len(self.cls_negative[0]) * percent)
self.cls_negative = [np.random.permutation(self.cls_negative[i])[0:n]
for i in range(num_classes)]
self.cls_positive = np.asarray(self.cls_positive)
self.cls_negative = np.asarray(self.cls_negative)
def __getitem__(self, index):
## for RGB only
image = self.images[index]
if self.transform is not None:
image = np.transpose(image, (1, 2, 0)) #C * H * W ----> H * W * C
image = Image.fromarray(np.uint8(image), mode = 'RGB') #H * W * C
image = self.transform(image)
label = self.labels[index]
if not self.is_sample:
# directly return
return image, label, index
else:
# sample contrastive examples
if self.mode == 'exact':
pos_idx = index
elif self.mode == 'relax':
pos_idx = np.random.choice(self.cls_positive[label], 1)
pos_idx = pos_idx[0]
else:
raise NotImplementedError(self.mode)
replace = True if self.k > len(self.cls_negative[label]) else False
neg_idx = np.random.choice(self.cls_negative[label], self.k, replace=replace)
sample_idx = np.hstack((np.asarray([pos_idx]), neg_idx))
return image, label, index, sample_idx
def __len__(self):
return self.n_images
def get_imagenet100_dataloaders_sample(data_folder, batch_size=128, num_workers=0, k=4096, mode='exact',
is_sample=True, percent=1.0,
use_fake_data=False, fake_data_folder=None, nfake=1e30):
"""
imagenet 100
"""
trainset_h5py_file = os.path.join(data_folder, 'ImageNet_128x128_100Class.h5')
hf = h5py.File(trainset_h5py_file, 'r')
real_images_train = hf['images_train'][:]
real_labels_train = hf['labels_train'][:]
real_images_test = hf['images_valid'][:]
real_labels_test = hf['labels_valid'][:]
hf.close()
num_classes=100
if use_fake_data:
## load fake data
with h5py.File(fake_data_folder, "r") as f:
train_images = f['fake_images'][:]
train_labels = f['fake_labels'][:]
train_labels = train_labels.astype(int)
if nfake<len(train_labels):
indx_fake = []
for i in range(num_classes):
indx_fake_i = np.where(train_labels==i)[0]
if i != (num_classes-1):
nfake_i = nfake//num_classes
else:
nfake_i = nfake-(nfake//num_classes)*i
indx_fake_i = np.random.choice(indx_fake_i, size=min(int(nfake_i), len(indx_fake_i)), replace=False)
indx_fake.append(indx_fake_i)
#end for i
indx_fake = np.concatenate(indx_fake)
train_images = train_images[indx_fake]
train_labels = train_labels[indx_fake]
## concatenate
train_images = np.concatenate((real_images_train, train_images), axis=0)
train_labels = np.concatenate((real_labels_train, train_labels), axis=0)
train_labels = train_labels.astype(int)
## compute normalizing constants
train_means = []
train_stds = []
for i in range(3):
images_i = train_images[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
print("\n Final training set's dimensiona: ", train_images.shape)
## make training set
train_transform = transforms.Compose([
# transforms.RandomCrop(128, padding=4),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
train_set = IMGs_dataset_CRD(train_images, train_labels, transform=train_transform, k=k, mode=mode, is_sample=is_sample, percent=percent)
assert len(train_set)==len(train_images)
del train_images, train_labels; gc.collect()
else:
## compute normalizing constants
train_means = []
train_stds = []
for i in range(3):
images_i = real_images_train[:,i,:,:]
images_i = images_i/255.0
train_means.append(np.mean(images_i))
train_stds.append(np.std(images_i))
## for i
print("\n Final training set's dimensiona: ", real_images_train.shape)
## make training set
train_transform = transforms.Compose([
# transforms.RandomCrop(128, padding=4),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_means, train_stds),
])
train_set = IMGs_dataset_CRD(real_images_train, real_labels_train, transform=train_transform, k=k, mode=mode, is_sample=is_sample, percent=percent)
n_data = len(train_set)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_set = IMGs_dataset(real_images_test, real_labels_test, transform=test_transform)
test_loader = DataLoader(test_set, batch_size=100, shuffle=False, num_workers=num_workers)
return train_loader, test_loader, n_data | 13,091 | 36.299145 | 155 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/dataset/imagenet.py | """
get data loaders
"""
from __future__ import print_function
import os
import socket
import numpy as np
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
def get_data_folder():
"""
return server-dependent path to store the data
"""
hostname = socket.gethostname()
if hostname.startswith('visiongpu'):
data_folder = '/data/vision/phillipi/rep-learn/datasets/imagenet'
elif hostname.startswith('yonglong-home'):
data_folder = '/home/yonglong/Data/data/imagenet'
else:
data_folder = './data/imagenet'
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
return data_folder
class ImageFolderInstance(datasets.ImageFolder):
""": Folder datasets which returns the index of the image as well::
"""
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
class ImageFolderSample(datasets.ImageFolder):
""": Folder datasets which returns (img, label, index, contrast_index):
"""
def __init__(self, root, transform=None, target_transform=None,
is_sample=False, k=4096):
super().__init__(root=root, transform=transform, target_transform=target_transform)
self.k = k
self.is_sample = is_sample
print('stage1 finished!')
if self.is_sample:
num_classes = len(self.classes)
num_samples = len(self.samples)
label = np.zeros(num_samples, dtype=np.int32)
for i in range(num_samples):
path, target = self.imgs[i]
label[i] = target
self.cls_positive = [[] for i in range(num_classes)]
for i in range(num_samples):
self.cls_positive[label[i]].append(i)
self.cls_negative = [[] for i in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if j == i:
continue
self.cls_negative[i].extend(self.cls_positive[j])
self.cls_positive = [np.asarray(self.cls_positive[i], dtype=np.int32) for i in range(num_classes)]
self.cls_negative = [np.asarray(self.cls_negative[i], dtype=np.int32) for i in range(num_classes)]
print('dataset initialized!')
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.is_sample:
# sample contrastive examples
pos_idx = index
neg_idx = np.random.choice(self.cls_negative[target], self.k, replace=True)
sample_idx = np.hstack((np.asarray([pos_idx]), neg_idx))
return img, target, index, sample_idx
else:
return img, target, index
def get_test_loader(dataset='imagenet', batch_size=128, num_workers=8):
"""get the test data loader"""
if dataset == 'imagenet':
data_folder = get_data_folder()
else:
raise NotImplementedError('dataset not supported: {}'.format(dataset))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
test_folder = os.path.join(data_folder, 'val')
test_set = datasets.ImageFolder(test_folder, transform=test_transform)
test_loader = DataLoader(test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return test_loader
def get_dataloader_sample(dataset='imagenet', batch_size=128, num_workers=8, is_sample=False, k=4096):
"""Data Loader for ImageNet"""
if dataset == 'imagenet':
data_folder = get_data_folder()
else:
raise NotImplementedError('dataset not supported: {}'.format(dataset))
# add data transform
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
train_folder = os.path.join(data_folder, 'train')
test_folder = os.path.join(data_folder, 'val')
train_set = ImageFolderSample(train_folder, transform=train_transform, is_sample=is_sample, k=k)
test_set = datasets.ImageFolder(test_folder, transform=test_transform)
train_loader = DataLoader(train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
test_loader = DataLoader(test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
print('num_samples', len(train_set.samples))
print('num_class', len(train_set.classes))
return train_loader, test_loader, len(train_set), len(train_set.classes)
def get_imagenet_dataloader(dataset='imagenet', batch_size=128, num_workers=16, is_instance=False):
"""
Data Loader for imagenet
"""
if dataset == 'imagenet':
data_folder = get_data_folder()
else:
raise NotImplementedError('dataset not supported: {}'.format(dataset))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
train_folder = os.path.join(data_folder, 'train')
test_folder = os.path.join(data_folder, 'val')
if is_instance:
train_set = ImageFolderInstance(train_folder, transform=train_transform)
n_data = len(train_set)
else:
train_set = datasets.ImageFolder(train_folder, transform=train_transform)
test_set = datasets.ImageFolder(test_folder, transform=test_transform)
train_loader = DataLoader(train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
test_loader = DataLoader(test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers//2,
pin_memory=True)
if is_instance:
return train_loader, test_loader, n_data
else:
return train_loader, test_loader
| 8,053 | 32.983122 | 110 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0(num_classes=100):
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg, num_classes=num_classes)
def EfficientNetB0_2(num_classes=100):
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
# 'stride': [1, 2, 2, 2, 1, 2, 1], ##original
'stride': [1, 2, 2, 2, 2, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg, num_classes=num_classes)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 128, 128)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test() | 6,221 | 32.272727 | 106 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
# self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1,
# bias=False) ##for cifar
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100, h=h//2
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(num_filters[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == blocks-1)))
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.relu)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
x = self.conv1(x) #64x64
x = self.bn1(x)
x = self.relu(x)
x = F.max_pool2d(x, 3, stride=2, padding=1) ##extra maxpool2d; 32x32
f0 = x
x, f1_pre = self.layer1(x) # 32x32
f1 = x
x, f2_pre = self.layer2(x) # 16x16
f2 = x
x, f3_pre = self.layer3(x) # 8x8
f3 = x
x = self.avgpool(x)
x = x.view(x.size(0), -1)
f4 = x
x = self.fc(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], x
else:
return [f0, f1, f2, f3, f4], x
else:
return x
def resnet8(**kwargs):
return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14(**kwargs):
return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet20(**kwargs):
return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet32(**kwargs):
return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet44(**kwargs):
return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet56(**kwargs):
return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet110(**kwargs):
return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet8x4(**kwargs):
return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet32x4(**kwargs):
return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs)
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = resnet8x4(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,967 | 29.764479 | 122 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/mobilenetv2.py | """
MobileNetV2 implementation used in
<Knowledge Distillation via Route Constrained Optimization>
"""
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
__all__ = ['mobilenetv2_T_w', 'mobile_half']
BN = None
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.blockname = None
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.names = ['0', '1', '2', '3', '4', '5', '6', '7']
def forward(self, x):
t = x
if self.use_res_connect:
return t + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
"""mobilenetV2"""
def __init__(self, T,
feature_dim,
input_size=128,
width_mult=1.,
remove_avg=False):
super(MobileNetV2, self).__init__()
self.remove_avg = remove_avg
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[T, 24, 2, 2], #s=1 for cifar
[T, 32, 3, 2],
[T, 64, 4, 2],
[T, 96, 3, 1],
[T, 160, 3, 2],
[T, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(32 * width_mult)
self.conv1 = conv_bn(3, input_channel, 2)
# building inverted residual blocks
self.blocks = nn.ModuleList([])
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * width_mult)
layers = []
strides = [s] + [1] * (n - 1)
for stride in strides:
layers.append(
InvertedResidual(input_channel, output_channel, stride, t)
)
input_channel = output_channel
self.blocks.append(nn.Sequential(*layers))
self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
self.conv2 = conv_1x1_bn(input_channel, self.last_channel)
# building classifier
self.classifier = nn.Sequential(
# nn.Dropout(0.5),
nn.Linear(self.last_channel, feature_dim),
)
# H = input_size // (32//2)
# self.avgpool = nn.AvgPool2d(H, ceil_mode=True)
self._initialize_weights()
print(T, width_mult)
def get_bn_before_relu(self):
bn1 = self.blocks[1][-1].conv[-1]
bn2 = self.blocks[2][-1].conv[-1]
bn3 = self.blocks[4][-1].conv[-1]
bn4 = self.blocks[6][-1].conv[-1]
return [bn1, bn2, bn3, bn4]
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.blocks)
return feat_m
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.blocks[0](out)
out = self.blocks[1](out)
f1 = out
out = self.blocks[2](out)
f2 = out
out = self.blocks[3](out)
out = self.blocks[4](out)
f3 = out
out = self.blocks[5](out)
out = self.blocks[6](out)
f4 = out
out = self.conv2(out)
if not self.remove_avg:
# out = self.avgpool(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
f5 = out
out = self.classifier(out)
if is_feat:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobilenetv2_T_w(T, W, feature_dim=100):
model = MobileNetV2(T=T, feature_dim=feature_dim, width_mult=W)
return model
def mobile_half(num_classes):
return mobilenetv2_T_w(6, 0.5, num_classes)
if __name__ == '__main__':
x = torch.randn(2, 3, 128, 128)
net = mobile_half(100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,822 | 27.404878 | 115 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/vgg.py | '''VGG '''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, cfg, batch_norm=False, num_classes=1000):
super(VGG, self).__init__()
self.block0 = self._make_layers(cfg[0], batch_norm, 3)
self.block1 = self._make_layers(cfg[1], batch_norm, cfg[0][-1])
self.block2 = self._make_layers(cfg[2], batch_norm, cfg[1][-1])
self.block3 = self._make_layers(cfg[3], batch_norm, cfg[2][-1])
self.block4 = self._make_layers(cfg[4], batch_norm, cfg[3][-1])
self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.pool4 = nn.AdaptiveAvgPool2d((1, 1)) #for cifar
self.pool4 = nn.MaxPool2d(kernel_size=4, stride=4) #for imagenet100
# self.classifier = nn.Linear(512, num_classes) #for cifar
self.classifier = nn.Sequential(
nn.Linear(512 * 4 * 4, 1024),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(1024, 1024),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(1024, num_classes),
) #for imagenet100
self._initialize_weights()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.block0)
feat_m.append(self.pool0)
feat_m.append(self.block1)
feat_m.append(self.pool1)
feat_m.append(self.block2)
feat_m.append(self.pool2)
feat_m.append(self.block3)
feat_m.append(self.pool3)
feat_m.append(self.block4)
feat_m.append(self.pool4)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block1[-1]
bn2 = self.block2[-1]
bn3 = self.block3[-1]
bn4 = self.block4[-1]
return [bn1, bn2, bn3, bn4]
def forward(self, x, is_feat=False, preact=False):
h = x.shape[2]
x = F.relu(self.block0(x))
f0 = x
x = self.pool0(x)
x = self.block1(x)
f1_pre = x
x = F.relu(x)
f1 = x
x = self.pool1(x)
x = self.block2(x)
f2_pre = x
x = F.relu(x)
f2 = x
x = self.pool2(x)
x = self.block3(x)
f3_pre = x
x = F.relu(x)
f3 = x
if h == 64:
x = self.pool3(x)
x = self.block4(x)
f4_pre = x
x = F.relu(x)
f4 = x
x = self.pool4(x)
x = x.view(x.size(0), -1)
f5 = x
x = self.classifier(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], x
else:
return [f0, f1, f2, f3, f4, f5], x
else:
return x
@staticmethod
def _make_layers(cfg, batch_norm=False, in_channels=3):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
layers = layers[:-1]
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [[64], [128], [256, 256], [512, 512], [512, 512]],
'B': [[64, 64], [128, 128], [256, 256], [512, 512], [512, 512]],
'D': [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]],
'E': [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]],
'S': [[64], [128], [256], [512], [512]],
}
def vgg8(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], **kwargs)
return model
def vgg8_bn(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], batch_norm=True, **kwargs)
return model
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['A'], **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(cfg['A'], batch_norm=True, **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['B'], **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(cfg['B'], batch_norm=True, **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['D'], **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(cfg['D'], batch_norm=True, **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['E'], **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(cfg['E'], batch_norm=True, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = vgg13_bn(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,304 | 28.695122 | 98 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/densenet.py | '''DenseNet in PyTorch.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
NC=3
IMG_SIZE = 128
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
# self.conv1 = nn.Conv2d(NC, num_planes, kernel_size=3, padding=1, bias=False)
self.conv1 = nn.Sequential(
nn.Conv2d(NC, num_planes, kernel_size=4, padding=1, stride=2, bias=False),
nn.BatchNorm2d(num_planes),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121(num_classes=10):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32, num_classes=num_classes)
def DenseNet169(num_classes=10):
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32, num_classes=num_classes)
def DenseNet201(num_classes=10):
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32, num_classes=num_classes)
def DenseNet161(num_classes=10):
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48, num_classes=num_classes)
def test_densenet():
net = DenseNet121(num_classes=100)
x = torch.randn(2,NC,IMG_SIZE,IMG_SIZE)
y = net(Variable(x))
print(y.shape)
if __name__ == "__main__":
test_densenet()
| 4,125 | 32.544715 | 96 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/classifier.py | from __future__ import print_function
import torch.nn as nn
#########################################
# ===== Classifiers ===== #
#########################################
class LinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=100):
super(LinearClassifier, self).__init__()
self.net = nn.Linear(dim_in, n_label)
def forward(self, x):
return self.net(x)
class NonLinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=100, p=0.1):
super(NonLinearClassifier, self).__init__()
self.net = nn.Sequential(
nn.Linear(dim_in, 200),
nn.Dropout(p=p),
nn.BatchNorm1d(200),
nn.ReLU(inplace=True),
nn.Linear(200, n_label),
)
def forward(self, x):
return self.net(x)
| 821 | 21.833333 | 51 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/resnetv2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100, h=h//2
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out, f4_pre = self.layer4(out)
f4 = out
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
if preact:
return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]
else:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def ResNet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def ResNet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def ResNet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def ResNet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def ResNet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if __name__ == '__main__':
net = ResNet18(num_classes=100)
x = torch.randn(2, 3, 128, 128)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 7,123 | 34.442786 | 110 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/ShuffleNetv1.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it (Following Table 5 of "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design")
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.stride = stride
mid_planes = int(out_planes/4)
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
preact = torch.cat([out, res], 1) if self.stride == 2 else out+res
out = F.relu(preact)
# out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
if self.is_last:
return out, preact
else:
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) #original
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], num_classes)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes,
stride=stride,
groups=groups,
is_last=(i == num_blocks - 1)))
self.in_planes = out_planes
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNet currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
def ShuffleV1(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
if __name__ == '__main__':
x = torch.randn(2, 3, 128, 128)
net = ShuffleV1(num_classes=100)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 5,107 | 34.472222 | 190 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/util.py | from __future__ import print_function
import torch.nn as nn
import math
class Paraphraser(nn.Module):
"""Paraphrasing Complex Network: Network Compression via Factor Transfer"""
def __init__(self, t_shape, k=0.5, use_bn=False):
super(Paraphraser, self).__init__()
in_channel = t_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(out_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s, is_factor=False):
factor = self.encoder(f_s)
if is_factor:
return factor
rec = self.decoder(factor)
return factor, rec
class Translator(nn.Module):
def __init__(self, s_shape, t_shape, k=0.5, use_bn=True):
super(Translator, self).__init__()
in_channel = s_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s):
return self.encoder(f_s)
class Connector(nn.Module):
"""Connect for Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons"""
def __init__(self, s_shapes, t_shapes):
super(Connector, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
@staticmethod
def _make_conenctors(s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
connectors = []
for s, t in zip(s_shapes, t_shapes):
if s[1] == t[1] and s[2] == t[2]:
connectors.append(nn.Sequential())
else:
connectors.append(ConvReg(s, t, use_relu=False))
return connectors
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConnectorV2(nn.Module):
"""A Comprehensive Overhaul of Feature Distillation (ICCV 2019)"""
def __init__(self, s_shapes, t_shapes):
super(ConnectorV2, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
def _make_conenctors(self, s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
t_channels = [t[1] for t in t_shapes]
s_channels = [s[1] for s in s_shapes]
connectors = nn.ModuleList([self._build_feature_connector(t, s)
for t, s in zip(t_channels, s_channels)])
return connectors
@staticmethod
def _build_feature_connector(t_channel, s_channel):
C = [nn.Conv2d(s_channel, t_channel, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(t_channel)]
for m in C:
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return nn.Sequential(*C)
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConvReg(nn.Module):
"""Convolutional regression for FitNet"""
def __init__(self, s_shape, t_shape, use_relu=True):
super(ConvReg, self).__init__()
self.use_relu = use_relu
s_N, s_C, s_H, s_W = s_shape
t_N, t_C, t_H, t_W = t_shape
if s_H == 2 * t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(s_C, t_C, kernel_size=4, stride=2, padding=1)
elif s_H >= t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=(1+s_H-t_H, 1+s_W-t_W))
else:
raise NotImplemented('student size {}, teacher size {}'.format(s_H, t_H))
self.bn = nn.BatchNorm2d(t_C)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_relu:
return self.relu(self.bn(x))
else:
return self.bn(x)
class Regress(nn.Module):
"""Simple Linear Regression for hints"""
def __init__(self, dim_in=1024, dim_out=1024):
super(Regress, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.relu(x)
return x
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class LinearEmbed(nn.Module):
"""Linear Embedding"""
def __init__(self, dim_in=1024, dim_out=128):
super(LinearEmbed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
return x
class MLPEmbed(nn.Module):
"""non-linear embed by MLP"""
def __init__(self, dim_in=1024, dim_out=128):
super(MLPEmbed, self).__init__()
self.linear1 = nn.Linear(dim_in, 2 * dim_out)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(2 * dim_out, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.relu(self.linear1(x))
x = self.l2norm(self.linear2(x))
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
class Flatten(nn.Module):
"""flatten module"""
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
class PoolEmbed(nn.Module):
"""pool and embed"""
def __init__(self, layer=0, dim_out=128, pool_type='avg'):
super().__init__()
if layer == 0:
pool_size = 8
nChannels = 16
elif layer == 1:
pool_size = 8
nChannels = 16
elif layer == 2:
pool_size = 6
nChannels = 32
elif layer == 3:
pool_size = 4
nChannels = 64
elif layer == 4:
pool_size = 1
nChannels = 64
else:
raise NotImplementedError('layer not supported: {}'.format(layer))
self.embed = nn.Sequential()
if layer <= 3:
if pool_type == 'max':
self.embed.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.embed.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.embed.add_module('Flatten', Flatten())
self.embed.add_module('Linear', nn.Linear(nChannels*pool_size*pool_size, dim_out))
self.embed.add_module('Normalize', Normalize(2))
def forward(self, x):
return self.embed(x)
if __name__ == '__main__':
import torch
g_s = [
torch.randn(2, 16, 16, 16),
torch.randn(2, 32, 8, 8),
torch.randn(2, 64, 4, 4),
]
g_t = [
torch.randn(2, 32, 16, 16),
torch.randn(2, 64, 8, 8),
torch.randn(2, 128, 4, 4),
]
s_shapes = [s.shape for s in g_s]
t_shapes = [t.shape for t in g_t]
net = ConnectorV2(s_shapes, t_shapes)
out = net(g_s)
for f in out:
print(f.shape)
| 9,622 | 32.068729 | 107 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/ShuffleNetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
To fit 128x128 images, I modified the first conv layer and add an extra max_pool2d after it (Following Table 5 of "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design")
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
preact = self.bn3(self.conv3(out))
out = F.relu(preact)
# out = F.relu(self.bn3(self.conv3(out)))
preact = torch.cat([x1, preact], 1)
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
if self.is_last:
return out, preact
else:
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size, num_classes=10):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=4, padding=1, stride=2, bias=False) #h=h//2
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], num_classes)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1)))
self.in_channels = out_channels
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNetV2 currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1)
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
configs = {
0.2: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 3, 3)
},
0.3: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 7, 3)
},
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def ShuffleV2(**kwargs):
model = ShuffleNetV2(net_size=1, **kwargs)
return model
if __name__ == '__main__':
net = ShuffleV2(num_classes=100)
x = torch.randn(2, 3, 128, 128)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 7,241 | 33.160377 | 190 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
# self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) #for cifar
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=4, stride=2, padding=1, bias=False) #for imagenet100
self.bn0 = nn.BatchNorm2d(nChannels[0])
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
# out = self.conv1(x) #for cifar
out = F.relu(self.bn0(self.conv1(x)))
out = F.max_pool2d(out, 3, stride=2, padding=1) ##extra maxpool2d
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 128, 128)
net = wrn_16_1(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,807 | 32.188571 | 116 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/PKT.py | from __future__ import print_function
import torch
import torch.nn as nn
class PKT(nn.Module):
"""Probabilistic Knowledge Transfer for deep representation learning
Code from author: https://github.com/passalis/probabilistic_kt"""
def __init__(self):
super(PKT, self).__init__()
def forward(self, f_s, f_t):
return self.cosine_similarity_loss(f_s, f_t)
@staticmethod
def cosine_similarity_loss(output_net, target_net, eps=0.0000001):
# Normalize each vector by its norm
output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True))
output_net = output_net / (output_net_norm + eps)
output_net[output_net != output_net] = 0
target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True))
target_net = target_net / (target_net_norm + eps)
target_net[target_net != target_net] = 0
# Calculate the cosine similarity
model_similarity = torch.mm(output_net, output_net.transpose(0, 1))
target_similarity = torch.mm(target_net, target_net.transpose(0, 1))
# Scale cosine similarity to 0..1
model_similarity = (model_similarity + 1.0) / 2.0
target_similarity = (target_similarity + 1.0) / 2.0
# Transform them into probabilities
model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True)
target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True)
# Calculate the KL-divergence
loss = torch.mean(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps)))
return loss
| 1,675 | 37.976744 | 110 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/SP.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class Similarity(nn.Module):
"""Similarity-Preserving Knowledge Distillation, ICCV2019, verified by original author"""
def __init__(self):
super(Similarity, self).__init__()
def forward(self, g_s, g_t):
return [self.similarity_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def similarity_loss(self, f_s, f_t):
bsz = f_s.shape[0]
f_s = f_s.view(bsz, -1)
f_t = f_t.view(bsz, -1)
G_s = torch.mm(f_s, torch.t(f_s))
# G_s = G_s / G_s.norm(2)
G_s = torch.nn.functional.normalize(G_s)
G_t = torch.mm(f_t, torch.t(f_t))
# G_t = G_t / G_t.norm(2)
G_t = torch.nn.functional.normalize(G_t)
G_diff = G_t - G_s
loss = (G_diff * G_diff).view(-1, 1).sum(0) / (bsz * bsz)
return loss
| 908 | 28.322581 | 93 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/AT.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""Paying More Attention to Attention: Improving the Performance of Convolutional Neural Networks
via Attention Transfer
code: https://github.com/szagoruyko/attention-transfer"""
def __init__(self, p=2):
super(Attention, self).__init__()
self.p = p
def forward(self, g_s, g_t):
return [self.at_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def at_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
return (self.at(f_s) - self.at(f_t)).pow(2).mean()
def at(self, f):
return F.normalize(f.pow(self.p).mean(1).view(f.size(0), -1))
| 930 | 30.033333 | 101 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/FitNet.py | from __future__ import print_function
import torch.nn as nn
class HintLoss(nn.Module):
"""Fitnets: hints for thin deep nets, ICLR 2015"""
def __init__(self):
super(HintLoss, self).__init__()
self.crit = nn.MSELoss()
def forward(self, f_s, f_t):
loss = self.crit(f_s, f_t)
return loss
| 332 | 21.2 | 54 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/KDSVD.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class KDSVD(nn.Module):
"""
Self-supervised Knowledge Distillation using Singular Value Decomposition
original Tensorflow code: https://github.com/sseung0703/SSKD_SVD
"""
def __init__(self, k=1):
super(KDSVD, self).__init__()
self.k = k
def forward(self, g_s, g_t):
v_sb = None
v_tb = None
losses = []
for i, f_s, f_t in zip(range(len(g_s)), g_s, g_t):
u_t, s_t, v_t = self.svd(f_t, self.k)
u_s, s_s, v_s = self.svd(f_s, self.k + 3)
v_s, v_t = self.align_rsv(v_s, v_t)
s_t = s_t.unsqueeze(1)
v_t = v_t * s_t
v_s = v_s * s_t
if i > 0:
s_rbf = torch.exp(-(v_s.unsqueeze(2) - v_sb.unsqueeze(1)).pow(2) / 8)
t_rbf = torch.exp(-(v_t.unsqueeze(2) - v_tb.unsqueeze(1)).pow(2) / 8)
l2loss = (s_rbf - t_rbf.detach()).pow(2)
l2loss = torch.where(torch.isfinite(l2loss), l2loss, torch.zeros_like(l2loss))
losses.append(l2loss.sum())
v_tb = v_t
v_sb = v_s
bsz = g_s[0].shape[0]
losses = [l / bsz for l in losses]
return losses
def svd(self, feat, n=1):
size = feat.shape
assert len(size) == 4
x = feat.view(-1, size[1], size[2] * size[2]).transpose(-2, -1)
u, s, v = torch.svd(x)
u = self.removenan(u)
s = self.removenan(s)
v = self.removenan(v)
if n > 0:
u = F.normalize(u[:, :, :n], dim=1)
s = F.normalize(s[:, :n], dim=1)
v = F.normalize(v[:, :, :n], dim=1)
return u, s, v
@staticmethod
def removenan(x):
x = torch.where(torch.isfinite(x), x, torch.zeros_like(x))
return x
@staticmethod
def align_rsv(a, b):
cosine = torch.matmul(a.transpose(-2, -1), b)
max_abs_cosine, _ = torch.max(torch.abs(cosine), 1, keepdim=True)
mask = torch.where(torch.eq(max_abs_cosine, torch.abs(cosine)),
torch.sign(cosine), torch.zeros_like(cosine))
a = torch.matmul(a, mask)
return a, b
| 2,275 | 28.947368 | 94 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/RKD.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class RKDLoss(nn.Module):
"""Relational Knowledge Disitllation, CVPR2019"""
def __init__(self, w_d=25, w_a=50):
super(RKDLoss, self).__init__()
self.w_d = w_d
self.w_a = w_a
def forward(self, f_s, f_t):
student = f_s.view(f_s.shape[0], -1)
teacher = f_t.view(f_t.shape[0], -1)
# RKD distance loss
with torch.no_grad():
t_d = self.pdist(teacher, squared=False)
mean_td = t_d[t_d > 0].mean()
t_d = t_d / mean_td
d = self.pdist(student, squared=False)
mean_d = d[d > 0].mean()
d = d / mean_d
loss_d = F.smooth_l1_loss(d, t_d)
# RKD Angle loss
with torch.no_grad():
td = (teacher.unsqueeze(0) - teacher.unsqueeze(1))
norm_td = F.normalize(td, p=2, dim=2)
t_angle = torch.bmm(norm_td, norm_td.transpose(1, 2)).view(-1)
sd = (student.unsqueeze(0) - student.unsqueeze(1))
norm_sd = F.normalize(sd, p=2, dim=2)
s_angle = torch.bmm(norm_sd, norm_sd.transpose(1, 2)).view(-1)
loss_a = F.smooth_l1_loss(s_angle, t_angle)
loss = self.w_d * loss_d + self.w_a * loss_a
return loss
@staticmethod
def pdist(e, squared=False, eps=1e-12):
e_square = e.pow(2).sum(dim=1)
prod = e @ e.t()
res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min=eps)
if not squared:
res = res.sqrt()
res = res.clone()
res[range(len(e)), range(len(e))] = 0
return res
| 1,681 | 27.508475 | 87 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/VID.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class VIDLoss(nn.Module):
"""Variational Information Distillation for Knowledge Transfer (CVPR 2019),
code from author: https://github.com/ssahn0215/variational-information-distillation"""
def __init__(self,
num_input_channels,
num_mid_channel,
num_target_channels,
init_pred_var=5.0,
eps=1e-5):
super(VIDLoss, self).__init__()
def conv1x1(in_channels, out_channels, stride=1):
return nn.Conv2d(
in_channels, out_channels,
kernel_size=1, padding=0,
bias=False, stride=stride)
self.regressor = nn.Sequential(
conv1x1(num_input_channels, num_mid_channel),
nn.ReLU(),
conv1x1(num_mid_channel, num_mid_channel),
nn.ReLU(),
conv1x1(num_mid_channel, num_target_channels),
)
self.log_scale = torch.nn.Parameter(
np.log(np.exp(init_pred_var-eps)-1.0) * torch.ones(num_target_channels)
)
self.eps = eps
def forward(self, input, target):
# pool for dimentsion match
s_H, t_H = input.shape[2], target.shape[2]
if s_H > t_H:
input = F.adaptive_avg_pool2d(input, (t_H, t_H))
elif s_H < t_H:
target = F.adaptive_avg_pool2d(target, (s_H, s_H))
else:
pass
pred_mean = self.regressor(input)
pred_var = torch.log(1.0+torch.exp(self.log_scale))+self.eps
pred_var = pred_var.view(1, -1, 1, 1)
neg_log_prob = 0.5*(
(pred_mean-target)**2/pred_var+torch.log(pred_var)
)
loss = torch.mean(neg_log_prob)
return loss
| 1,862 | 32.872727 | 90 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/CC.py | from __future__ import print_function
import torch
import torch.nn as nn
class Correlation(nn.Module):
"""Correlation Congruence for Knowledge Distillation, ICCV 2019.
The authors nicely shared the code with me. I restructured their code to be
compatible with my running framework. Credits go to the original author"""
def __init__(self):
super(Correlation, self).__init__()
def forward(self, f_s, f_t):
delta = torch.abs(f_s - f_t)
loss = torch.mean((delta[:-1] * delta[1:]).sum(1))
return loss
# class Correlation(nn.Module):
# """Similarity-preserving loss. My origianl own reimplementation
# based on the paper before emailing the original authors."""
# def __init__(self):
# super(Correlation, self).__init__()
#
# def forward(self, f_s, f_t):
# return self.similarity_loss(f_s, f_t)
# # return [self.similarity_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
#
# def similarity_loss(self, f_s, f_t):
# bsz = f_s.shape[0]
# f_s = f_s.view(bsz, -1)
# f_t = f_t.view(bsz, -1)
#
# G_s = torch.mm(f_s, torch.t(f_s))
# G_s = G_s / G_s.norm(2)
# G_t = torch.mm(f_t, torch.t(f_t))
# G_t = G_t / G_t.norm(2)
#
# G_diff = G_t - G_s
# loss = (G_diff * G_diff).view(-1, 1).sum(0) / (bsz * bsz)
# return loss
| 1,384 | 31.209302 | 81 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/FT.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class FactorTransfer(nn.Module):
"""Paraphrasing Complex Network: Network Compression via Factor Transfer, NeurIPS 2018"""
def __init__(self, p1=2, p2=1):
super(FactorTransfer, self).__init__()
self.p1 = p1
self.p2 = p2
def forward(self, f_s, f_t):
return self.factor_loss(f_s, f_t)
def factor_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
if self.p2 == 1:
return (self.factor(f_s) - self.factor(f_t)).abs().mean()
else:
return (self.factor(f_s) - self.factor(f_t)).pow(self.p2).mean()
def factor(self, f):
return F.normalize(f.pow(self.p1).mean(1).view(f.size(0), -1))
| 981 | 29.6875 | 93 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/KD.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s/self.T, dim=1)
p_t = F.softmax(y_t/self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T**2) / y_s.shape[0]
return loss
| 493 | 26.444444 | 82 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/AB.py | from __future__ import print_function
import torch
import torch.nn as nn
class ABLoss(nn.Module):
"""Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons
code: https://github.com/bhheo/AB_distillation
"""
def __init__(self, feat_num, margin=1.0):
super(ABLoss, self).__init__()
self.w = [2**(i-feat_num+1) for i in range(feat_num)]
self.margin = margin
def forward(self, g_s, g_t):
bsz = g_s[0].shape[0]
losses = [self.criterion_alternative_l2(s, t) for s, t in zip(g_s, g_t)]
losses = [w * l for w, l in zip(self.w, losses)]
# loss = sum(losses) / bsz
# loss = loss / 1000 * 3
losses = [l / bsz for l in losses]
losses = [l / 1000 * 3 for l in losses]
return losses
def criterion_alternative_l2(self, source, target):
loss = ((source + self.margin) ** 2 * ((source > -self.margin) & (target <= 0)).float() +
(source - self.margin) ** 2 * ((source <= self.margin) & (target > 0)).float())
return torch.abs(loss).sum()
| 1,100 | 35.7 | 97 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/FSP.py | from __future__ import print_function
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class FSP(nn.Module):
"""A Gift from Knowledge Distillation:
Fast Optimization, Network Minimization and Transfer Learning"""
def __init__(self, s_shapes, t_shapes):
super(FSP, self).__init__()
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
s_c = [s[1] for s in s_shapes]
t_c = [t[1] for t in t_shapes]
if np.any(np.asarray(s_c) != np.asarray(t_c)):
raise ValueError('num of channels not equal (error in FSP)')
def forward(self, g_s, g_t):
s_fsp = self.compute_fsp(g_s)
t_fsp = self.compute_fsp(g_t)
loss_group = [self.compute_loss(s, t) for s, t in zip(s_fsp, t_fsp)]
return loss_group
@staticmethod
def compute_loss(s, t):
return (s - t).pow(2).mean()
@staticmethod
def compute_fsp(g):
fsp_list = []
for i in range(len(g) - 1):
bot, top = g[i], g[i + 1]
b_H, t_H = bot.shape[2], top.shape[2]
if b_H > t_H:
bot = F.adaptive_avg_pool2d(bot, (t_H, t_H))
elif b_H < t_H:
top = F.adaptive_avg_pool2d(top, (b_H, b_H))
else:
pass
bot = bot.unsqueeze(1)
top = top.unsqueeze(2)
bot = bot.view(bot.shape[0], bot.shape[1], bot.shape[2], -1)
top = top.view(top.shape[0], top.shape[1], top.shape[2], -1)
fsp = (bot * top).mean(-1)
fsp_list.append(fsp)
return fsp_list
| 1,625 | 32.183673 | 76 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/distiller_zoo/NST.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class NSTLoss(nn.Module):
"""like what you like: knowledge distill via neuron selectivity transfer"""
def __init__(self):
super(NSTLoss, self).__init__()
pass
def forward(self, g_s, g_t):
return [self.nst_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def nst_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
f_s = f_s.view(f_s.shape[0], f_s.shape[1], -1)
f_s = F.normalize(f_s, dim=2)
f_t = f_t.view(f_t.shape[0], f_t.shape[1], -1)
f_t = F.normalize(f_t, dim=2)
# set full_loss as False to avoid unnecessary computation
full_loss = True
if full_loss:
return (self.poly_kernel(f_t, f_t).mean().detach() + self.poly_kernel(f_s, f_s).mean()
- 2 * self.poly_kernel(f_s, f_t).mean())
else:
return self.poly_kernel(f_s, f_s).mean() - 2 * self.poly_kernel(f_s, f_t).mean()
def poly_kernel(self, a, b):
a = a.unsqueeze(1)
b = b.unsqueeze(2)
res = (a * b).sum(-1).pow(2)
return res | 1,366 | 31.547619 | 98 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/helper/pretrain.py | from __future__ import print_function, division
import time
import sys
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
from .util import AverageMeter
def init(model_s, model_t, init_modules, criterion, train_loader, opt):
model_t.eval()
model_s.eval()
init_modules.train()
if torch.cuda.is_available():
model_s.cuda()
model_t.cuda()
init_modules.cuda()
cudnn.benchmark = True
if opt.model_s in ['resnet8', 'resnet14', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110',
'resnet8x4', 'resnet32x4', 'wrn_16_1', 'wrn_16_2', 'wrn_40_1', 'wrn_40_2'] and \
opt.distill == 'factor':
lr = 0.01
else:
lr = opt.learning_rate
optimizer = optim.SGD(init_modules.parameters(),
lr=lr,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
for epoch in range(1, opt.init_epochs + 1):
batch_time.reset()
data_time.reset()
losses.reset()
end = time.time()
for idx, data in enumerate(train_loader):
if opt.distill in ['crd']:
input, target, index, contrast_idx = data
else:
input, target, index = data
data_time.update(time.time() - end)
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
index = index.cuda()
if opt.distill in ['crd']:
contrast_idx = contrast_idx.cuda()
# ============= forward ==============
preact = (opt.distill == 'abound')
feat_s, _ = model_s(input, is_feat=True, preact=preact)
with torch.no_grad():
feat_t, _ = model_t(input, is_feat=True, preact=preact)
feat_t = [f.detach() for f in feat_t]
if opt.distill == 'abound':
g_s = init_modules[0](feat_s[1:-1])
g_t = feat_t[1:-1]
loss_group = criterion(g_s, g_t)
loss = sum(loss_group)
elif opt.distill == 'factor':
f_t = feat_t[-2]
_, f_t_rec = init_modules[0](f_t)
loss = criterion(f_t_rec, f_t)
elif opt.distill == 'fsp':
loss_group = criterion(feat_s[:-1], feat_t[:-1])
loss = sum(loss_group)
else:
raise NotImplemented('Not supported in init training: {}'.format(opt.distill))
losses.update(loss.item(), input.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
# end of epoch
print('Epoch: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'losses: {losses.val:.3f} ({losses.avg:.3f})'.format(
epoch, opt.init_epochs, batch_time=batch_time, losses=losses))
sys.stdout.flush()
| 3,305 | 34.170213 | 106 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/helper/loops.py | from __future__ import print_function, division
import sys
import time
import torch
from .util import AverageMeter, accuracy
def train_vanilla(epoch, train_loader, model, criterion, optimizer, opt):
"""vanilla training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for idx, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
target = target.type(torch.long)
# ===================forward=====================
output = model(input)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
batch_time.update(time.time() - end)
end = time.time()
# tensorboard logger
pass
# print info
if idx % opt.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, idx, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, losses.avg
def train_distill(epoch, train_loader, module_list, criterion_list, optimizer, opt):
"""One epoch distillation"""
# set modules as train()
for module in module_list:
module.train()
# set teacher as eval()
module_list[-1].eval()
if opt.distill == 'abound':
module_list[1].eval()
elif opt.distill == 'factor':
module_list[2].eval()
criterion_cls = criterion_list[0]
criterion_div = criterion_list[1]
criterion_kd = criterion_list[2]
model_s = module_list[0]
model_t = module_list[-1]
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for idx, data in enumerate(train_loader):
if opt.distill in ['crd']:
input, target, index, contrast_idx = data
else:
input, target, index = data
data_time.update(time.time() - end)
input = input.float()
target = target.type(torch.long)
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
index = index.cuda()
if opt.distill in ['crd']:
contrast_idx = contrast_idx.cuda()
# ===================forward=====================
preact = False
if opt.distill in ['abound']:
preact = True
feat_s, logit_s = model_s(input, is_feat=True, preact=preact)
with torch.no_grad():
if opt.model_t[0:8]=="densenet":
assert opt.distill=='kd' ##other KD method does not support densenet
logit_t = model_t(input)
else:
feat_t, logit_t = model_t(input, is_feat=True, preact=preact)
feat_t = [f.detach() for f in feat_t]
# cls + kl div
loss_cls = criterion_cls(logit_s, target)
loss_div = criterion_div(logit_s, logit_t)
# other kd beyond KL divergence
if opt.distill == 'kd':
loss_kd = 0
elif opt.distill == 'hint':
f_s = module_list[1](feat_s[opt.hint_layer])
f_t = feat_t[opt.hint_layer]
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'crd':
f_s = feat_s[-1]
f_t = feat_t[-1]
loss_kd = criterion_kd(f_s, f_t, index, contrast_idx)
elif opt.distill == 'attention':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'nst':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'similarity':
g_s = [feat_s[-2]]
g_t = [feat_t[-2]]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'rkd':
f_s = feat_s[-1]
f_t = feat_t[-1]
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'pkt':
f_s = feat_s[-1]
f_t = feat_t[-1]
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'kdsvd':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'correlation':
f_s = module_list[1](feat_s[-1])
f_t = module_list[2](feat_t[-1])
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'vid':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = [c(f_s, f_t) for f_s, f_t, c in zip(g_s, g_t, criterion_kd)]
loss_kd = sum(loss_group)
elif opt.distill == 'abound':
# can also add loss to this stage
loss_kd = 0
elif opt.distill == 'fsp':
# can also add loss to this stage
loss_kd = 0
elif opt.distill == 'factor':
factor_s = module_list[1](feat_s[-2])
factor_t = module_list[2](feat_t[-2], is_factor=True)
loss_kd = criterion_kd(factor_s, factor_t)
else:
raise NotImplementedError(opt.distill)
loss = opt.gamma * loss_cls + opt.alpha * loss_div + opt.beta * loss_kd
acc1, acc5 = accuracy(logit_s, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
batch_time.update(time.time() - end)
end = time.time()
# print info
if idx % opt.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, idx, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, losses.avg
def validate(val_loader, model, criterion, opt):
"""validation"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for idx, (input, target) in enumerate(val_loader):
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
target = target.long()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % opt.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
| 9,341 | 33.345588 | 85 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/helper/util.py | from __future__ import print_function
import torch
import numpy as np
def adjust_learning_rate_new(epoch, optimizer, LUT):
"""
new learning rate schedule according to RotNet
"""
lr = next((lr for (max_epoch, lr) in LUT if max_epoch > epoch), LUT[-1][1])
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate(epoch, opt, optimizer):
"""Sets the learning rate to the initial LR decayed by decay rate every steep step"""
steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
if steps > 0:
new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
# correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
pass
| 1,786 | 26.921875 | 89 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/crd/memory.py | import torch
from torch import nn
import math
class ContrastMemory(nn.Module):
"""
memory buffer that supplies large amount of negative samples.
"""
def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5):
super(ContrastMemory, self).__init__()
self.nLem = outputSize
self.unigrams = torch.ones(self.nLem)
self.multinomial = AliasMethod(self.unigrams)
self.multinomial.cuda()
self.K = K
self.register_buffer('params', torch.tensor([K, T, -1, -1, momentum]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory_v1', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('memory_v2', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
def forward(self, v1, v2, y, idx=None):
K = int(self.params[0].item())
T = self.params[1].item()
Z_v1 = self.params[2].item()
Z_v2 = self.params[3].item()
momentum = self.params[4].item()
batchSize = v1.size(0)
outputSize = self.memory_v1.size(0)
inputSize = self.memory_v1.size(1)
# original score computation
if idx is None:
idx = self.multinomial.draw(batchSize * (self.K + 1)).view(batchSize, -1)
idx.select(1, 0).copy_(y.data)
# sample
weight_v1 = torch.index_select(self.memory_v1, 0, idx.view(-1)).detach()
weight_v1 = weight_v1.view(batchSize, K + 1, inputSize)
out_v2 = torch.bmm(weight_v1, v2.view(batchSize, inputSize, 1))
out_v2 = torch.exp(torch.div(out_v2, T))
# sample
weight_v2 = torch.index_select(self.memory_v2, 0, idx.view(-1)).detach()
weight_v2 = weight_v2.view(batchSize, K + 1, inputSize)
out_v1 = torch.bmm(weight_v2, v1.view(batchSize, inputSize, 1))
out_v1 = torch.exp(torch.div(out_v1, T))
# set Z if haven't been set yet
if Z_v1 < 0:
self.params[2] = out_v1.mean() * outputSize
Z_v1 = self.params[2].clone().detach().item()
print("normalization constant Z_v1 is set to {:.1f}".format(Z_v1))
if Z_v2 < 0:
self.params[3] = out_v2.mean() * outputSize
Z_v2 = self.params[3].clone().detach().item()
print("normalization constant Z_v2 is set to {:.1f}".format(Z_v2))
# compute out_v1, out_v2
out_v1 = torch.div(out_v1, Z_v1).contiguous()
out_v2 = torch.div(out_v2, Z_v2).contiguous()
# update memory
with torch.no_grad():
l_pos = torch.index_select(self.memory_v1, 0, y.view(-1))
l_pos.mul_(momentum)
l_pos.add_(torch.mul(v1, 1 - momentum))
l_norm = l_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v1 = l_pos.div(l_norm)
self.memory_v1.index_copy_(0, y, updated_v1)
ab_pos = torch.index_select(self.memory_v2, 0, y.view(-1))
ab_pos.mul_(momentum)
ab_pos.add_(torch.mul(v2, 1 - momentum))
ab_norm = ab_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v2 = ab_pos.div(ab_norm)
self.memory_v2.index_copy_(0, y, updated_v2)
return out_v1, out_v2
class AliasMethod(object):
"""
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0]*K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K*prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
""" Draw N samples from multinomial """
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj | 5,126 | 35.884892 | 120 | py |
cGAN-KD | cGAN-KD-main/ImageNet-100/RepDistiller/crd/criterion.py | import torch
from torch import nn
from .memory import ContrastMemory
eps = 1e-7
class CRDLoss(nn.Module):
"""CRD Loss function
includes two symmetric parts:
(a) using teacher as anchor, choose positive and negatives over the student side
(b) using student as anchor, choose positive and negatives over the teacher side
Args:
opt.s_dim: the dimension of student's feature
opt.t_dim: the dimension of teacher's feature
opt.feat_dim: the dimension of the projection space
opt.nce_k: number of negatives paired with each positive
opt.nce_t: the temperature
opt.nce_m: the momentum for updating the memory buffer
opt.n_data: the number of samples in the training set, therefor the memory buffer is: opt.n_data x opt.feat_dim
"""
def __init__(self, opt):
super(CRDLoss, self).__init__()
self.embed_s = Embed(opt.s_dim, opt.feat_dim)
self.embed_t = Embed(opt.t_dim, opt.feat_dim)
self.contrast = ContrastMemory(opt.feat_dim, opt.n_data, opt.nce_k, opt.nce_t, opt.nce_m)
self.criterion_t = ContrastLoss(opt.n_data)
self.criterion_s = ContrastLoss(opt.n_data)
def forward(self, f_s, f_t, idx, contrast_idx=None):
"""
Args:
f_s: the feature of student network, size [batch_size, s_dim]
f_t: the feature of teacher network, size [batch_size, t_dim]
idx: the indices of these positive samples in the dataset, size [batch_size]
contrast_idx: the indices of negative samples, size [batch_size, nce_k]
Returns:
The contrastive loss
"""
f_s = self.embed_s(f_s)
f_t = self.embed_t(f_t)
out_s, out_t = self.contrast(f_s, f_t, idx, contrast_idx)
s_loss = self.criterion_s(out_s)
t_loss = self.criterion_t(out_t)
loss = s_loss + t_loss
return loss
class ContrastLoss(nn.Module):
"""
contrastive loss, corresponding to Eq (18)
"""
def __init__(self, n_data):
super(ContrastLoss, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
| 3,309 | 31.135922 | 119 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/TAKD/utils.py | import numpy as np
import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch.nn import functional as F
import sys
import PIL
from PIL import Image
# ################################################################################
# Progress Bar
class SimpleProgressBar():
def __init__(self, width=50):
self.last_x = -1
self.width = width
def update(self, x):
assert 0 <= x <= 100 # `x`: progress in percent ( between 0 and 100)
if self.last_x == int(x): return
self.last_x = int(x)
pointer = int(self.width * (x / 100.0))
sys.stdout.write( '\r%d%% [%s]' % (int(x), '#' * pointer + '.' * (self.width - pointer)))
sys.stdout.flush()
if x == 100:
print('')
################################################################################
# torch dataset from numpy array
class IMGs_dataset(torch.utils.data.Dataset):
def __init__(self, images, labels=None, transform=None):
super(IMGs_dataset, self).__init__()
self.images = images
self.n_images = len(self.images)
self.labels = labels
if labels is not None:
if len(self.images) != len(self.labels):
raise Exception('images (' + str(len(self.images)) +') and labels ('+str(len(self.labels))+') do not have the same length!!!')
self.transform = transform
def __getitem__(self, index):
## for RGB only
image = self.images[index]
if self.transform is not None:
image = np.transpose(image, (1, 2, 0)) #C * H * W ----> H * W * C
image = Image.fromarray(np.uint8(image), mode = 'RGB') #H * W * C
image = self.transform(image)
# ## for grey scale only
# image = self.images[index]
# if self.transform is not None:
# image = Image.fromarray(np.uint8(image[0]), mode = 'L') #H * W * C
# image = self.transform(image)
# # image = np.array(image)
# # image = image[np.newaxis,:,:]
# # print(image.shape)
if self.labels is not None:
label = self.labels[index]
return image, label
return image
def __len__(self):
return self.n_images
################################################################################
def PlotLoss(loss, filename):
x_axis = np.arange(start = 1, stop = len(loss)+1)
plt.switch_backend('agg')
mpl.style.use('seaborn')
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x_axis, np.array(loss))
plt.xlabel("epoch")
plt.ylabel("training loss")
plt.legend()
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), shadow=True, ncol=3)
#plt.title('Training Loss')
plt.savefig(filename)
################################################################################
# Convenience function to count the number of parameters in a module
def count_parameters(module, verbose=True):
num_parameters = sum([p.data.nelement() for p in module.parameters()])
if verbose:
print('Number of parameters: {}'.format(num_parameters))
return num_parameters | 3,223 | 32.583333 | 143 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.