repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
hawp | hawp-master/scripts/train.py | import torch
import random
import numpy as np
from parsing.config import cfg
from parsing.utils.comm import to_device
from parsing.dataset import build_train_dataset
from parsing.detector import WireframeDetector
from parsing.solver import make_lr_scheduler, make_optimizer
from parsing.utils.logger import setup_logger
from parsing.utils.metric_logger import MetricLogger
from parsing.utils.miscellaneous import save_config
from parsing.utils.checkpoint import DetectronCheckpointer
import os
import time
import datetime
import argparse
import logging
class LossReducer(object):
def __init__(self,cfg):
# self.loss_keys = cfg.MODEL.LOSS_WEIGHTS.keys()
self.loss_weights = dict(cfg.MODEL.LOSS_WEIGHTS)
def __call__(self, loss_dict):
total_loss = sum([self.loss_weights[k]*loss_dict[k]
for k in self.loss_weights.keys()])
return total_loss
def train(cfg):
logger = logging.getLogger("hawp.trainer")
device = cfg.MODEL.DEVICE
model = WireframeDetector(cfg)
model = model.to(device)
train_dataset = build_train_dataset(cfg)
optimizer = make_optimizer(cfg,model)
scheduler = make_lr_scheduler(cfg,optimizer)
loss_reducer = LossReducer(cfg)
arguments = {}
arguments["epoch"] = 0
max_epoch = cfg.SOLVER.MAX_EPOCH
arguments["max_epoch"] = max_epoch
checkpointer = DetectronCheckpointer(cfg,
model,
optimizer,
save_dir=cfg.OUTPUT_DIR,
save_to_disk=True,
logger=logger)
_ = checkpointer.load()
start_training_time = time.time()
end = time.time()
start_epoch = arguments['epoch']
epoch_size = len(train_dataset)
global_iteration = epoch_size*start_epoch
for epoch in range(start_epoch+1, arguments['max_epoch']+1):
meters = MetricLogger(" ")
model.train()
arguments['epoch'] = epoch
for it, (images, annotations) in enumerate(train_dataset):
data_time = time.time() - end
images = images.to(device)
annotations = to_device(annotations,device)
loss_dict, _ = model(images,annotations)
total_loss = loss_reducer(loss_dict)
with torch.no_grad():
loss_dict_reduced = {k:v.item() for k,v in loss_dict.items()}
loss_reduced = total_loss.item()
meters.update(loss=loss_reduced, **loss_dict_reduced)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
global_iteration +=1
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_batch = epoch_size*(max_epoch-epoch+1) - it +1
eta_seconds = meters.time.global_avg*eta_batch
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if it % 20 == 0 or it+1 == len(train_dataset):
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"epoch: {epoch}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}\n",
]
).format(
eta=eta_string,
epoch=epoch,
iter=it,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
checkpointer.save('model_{:05d}'.format(epoch))
scheduler.step()
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / epoch)".format(
total_time_str, total_training_time / (max_epoch)
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='HAWP Training')
parser.add_argument("--config-file",
metavar="FILE",
help="path to config file",
type=str,
required=True,
)
parser.add_argument("--clean",
default=False,
action='store_true')
parser.add_argument("--seed",
default=2,
type=int)
parser.add_argument("opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
if os.path.isdir(output_dir) and args.clean:
import shutil
shutil.rmtree(output_dir)
os.makedirs(output_dir, exist_ok=True)
logger = setup_logger('hawp', output_dir, out_file='train.log')
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file,"r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
output_config_path = os.path.join(cfg.OUTPUT_DIR, 'config.yml')
logger.info("Saving config into: {}".format(output_config_path))
save_config(cfg, output_config_path)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
train(cfg)
| 6,098 | 31.614973 | 83 | py |
svrhm21_RNN_explain | svrhm21_RNN_explain-main/RNN_analyse_reprs_recurrence.py | # Script to perform decoding analyses on the trained layer activations and the recurrent flow
# Requires tensorflow 1.13, python 3.7, scikit-learn, and pytorch 1.6.0
############################# IMPORTING MODULES ##################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import tensorflow as tf
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.interpolation import rotate
from random import shuffle
from sklearn import svm
from scipy import ndimage
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
fmnist = input_data.read_data_sets('fMNIST_data', one_hot=True)
############################# FUNCTIONS DEFINED ##################################
# A function to scramble image chunks
def im_scram(im,parts_h): # scramble parts_h*parts_h equal parts of the given image
win_prop = parts_h
dimsh = np.shape(im)
im_new = np.zeros(dimsh)
dimsh_win = np.floor(dimsh[0]/win_prop)
n_cells = np.square(np.int(dimsh[0]/dimsh_win))
cell_c = np.int(dimsh[0]/dimsh_win)
ind_new = np.linspace(0,n_cells-1,n_cells).astype('int32')
while np.mean(ind_new == np.linspace(0,n_cells-1,n_cells).astype('int32')) == 1:
shuffle(ind_new)
for i in range(n_cells):
j = ind_new[i]
im_new[np.int(np.mod(i,cell_c)*dimsh_win):np.int(np.mod(i,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(i*1./cell_c*1.)*dimsh_win):np.int(np.floor(i*1./cell_c*1.)*dimsh_win+dimsh_win)] = im[
np.int(np.mod(j,cell_c)*dimsh_win):np.int(np.mod(j,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(j*1./cell_c*1.)*dimsh_win):np.int(np.floor(j*1./cell_c*1.)*dimsh_win+dimsh_win)]
return im_new
# A function to generate images and the respective labels for training and testing
def gen_images(n_imgs,n_set): # n_imgs required, set used (0 train, 1 val, 2 test) 8 objects in image (1 is intact), 2 levels of zoom, rotation and x/y pos for each object
imgs_h = np.zeros([n_imgs,1,100,100])
imgs_h1 = np.zeros([n_imgs,1,100,100])
labs_h = np.zeros([n_imgs,20])
pos_x_h = np.zeros([n_imgs,2])
pos_y_h = np.zeros([n_imgs,2])
size_h = np.zeros([n_imgs,2])
rot_h = np.zeros([n_imgs,2])
n_objs = 8
for n_im in np.arange(n_imgs):
inst_img = np.zeros([100,100])
inst_img1 = np.zeros([100,100])
obj_ord = np.linspace(0,n_objs-1,n_objs)
dum_obj_ind = 4+np.random.randint(n_objs/2)
dum_dat_ord = (np.random.random(8) < 0.5)*1.
for i in np.arange(n_objs):
if dum_dat_ord[i] == 0: # dataset M or F
if n_set == 0:
dathh = mnist.train
elif n_set == 1:
dathh = mnist.validation
elif n_set == 2:
dathh = mnist.test
inst_obj_ind = np.random.randint(np.shape(dathh.images)[0])
if i == dum_obj_ind:
inst_lab = np.where(dathh.labels[inst_obj_ind,:]==1)[0][0]
inst_obj = np.reshape(dathh.images[inst_obj_ind,:],(28,28))
else:
if n_set == 0:
dathh = fmnist.train
elif n_set == 1:
dathh = fmnist.validation
elif n_set == 2:
dathh = fmnist.test
inst_obj_ind = np.random.randint(np.shape(dathh.images)[0])
if i == dum_obj_ind:
inst_lab = 10 + np.where(dathh.labels[inst_obj_ind,:]==1)[0][0]
inst_obj = np.reshape(dathh.images[inst_obj_ind,:],(28,28))
dumh111 = (np.random.random(1)[0] > 0.5)*1
if dumh111 == 0: # zoom 0.9 or 1.5
inst_obj = zoom(inst_obj,0.9+(np.random.random(1)[0]-0.5)/5.) # zoom 0.8 to 1.
else:
inst_obj = zoom(inst_obj,1.5+(np.random.random(1)[0]-0.5)/5.) # zoom 1.4 to 1.6
if i == dum_obj_ind:
size_h[n_im,dumh111] = 1.
dumh111 = (np.random.random(1)[0] > 0.5)*1
if dumh111 == 0: # rotate 30 or -30
inst_obj = rotate(inst_obj,30+(np.random.random(1)[0]-0.5)*2*5,reshape=False) # rotate 25 to 35
else:
inst_obj = rotate(inst_obj,-30+(np.random.random(1)[0]-0.5)*2*5,reshape=False) # rotate -25 to -35
if i == dum_obj_ind:
rot_h[n_im,dumh111] = 1.
if i != dum_obj_ind:
inst_obj = im_scram(inst_obj,3) # scrambled if not object of interest
if np.mod(obj_ord[i],4) == 0: # x_loc up or down
x_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
y_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,0] = 1.
pos_x_h[n_im,0] = 1.
elif np.mod(obj_ord[i],4) == 1:
x_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
y_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,1] = 1.
pos_x_h[n_im,0] = 1.
elif np.mod(obj_ord[i],4) == 2:
x_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
y_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,0] = 1.
pos_x_h[n_im,1] = 1.
elif np.mod(obj_ord[i],4) == 3:
x_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
y_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,1] = 1.
pos_x_h[n_im,1] = 1.
inst_obj = (inst_obj-np.min(inst_obj))/(np.max(inst_obj)-np.min(inst_obj))
# print(np.int(np.floor(np.shape(inst_obj)[0]/2)),np.int(np.ceil(np.shape(inst_obj)[0]/2)),np.shape(inst_obj)[0])
inst_img[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
if i == dum_obj_ind:
inst_img1[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img1[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img = (inst_img-np.min(inst_img))/(np.max(inst_img)-np.min(inst_img))
inst_img1 = (inst_img1-np.min(inst_img1))/(np.max(inst_img1)-np.min(inst_img1))
if np.isnan(np.min(inst_img)) or np.isnan(np.min(inst_img1)):
print('NaN in input')
exit(1)
imgs_h[n_im,0,:,:] = inst_img
imgs_h1[n_im,0,:,:] = inst_img1
labs_h[n_im,inst_lab] = 1.
return imgs_h,imgs_h1,labs_h,pos_x_h,pos_y_h,size_h,rot_h
# Defining the RNN class for extracting representations and feedback
class RNNet_all_fbr(nn.Module):
def __init__(self, n_feats=8, ker_size=5,t_steps=3,b_flag=1,g_flag=1,l_flag=1,t_flag=1):
super(RNNet_all_fbr, self).__init__()
self.conv1 = nn.Conv2d(1, n_feats, ker_size)
self.pool = nn.MaxPool2d(3, 3)
self.conv2 = nn.Conv2d(n_feats, n_feats*2, ker_size)
self.fc1 = nn.Linear(n_feats*2 * 9 * 9, n_feats*16)
self.fc2 = nn.Linear(n_feats*16*t_steps, 20)
self.dropout = nn.Dropout(0.5)
self.c1xb = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xb = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xb = nn.Linear(n_feats*16, 100*100)
self.c1c1b = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1b = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1b = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2b = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2b = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1b = nn.Linear(n_feats*16, n_feats*16)
self.c1xg = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xg = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xg = nn.Linear(n_feats*16, 100*100)
self.c1c1g = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1g = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1g = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2g = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2g = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1g = nn.Linear(n_feats*16, n_feats*16)
self.n_feats = n_feats
self.t_steps = t_steps
self.b_flag = b_flag
self.g_flag = g_flag
self.l_flag = l_flag
self.t_flag = t_flag
def forward(self, x):
actvs = {}
actvs[0] = {}
actvs[1] = {}
actvs[2] = {}
actvs[3] = {}
fb_acts = {}
fb_acts[0] = {}
fb_acts[1] = {}
fb_acts[2] = {}
fb_acts[3] = {}
fb_acts_comb = {}
fb_acts_comb[0] = {}
fb_acts_comb[1] = {}
fb_acts_comb[2] = {}
fb_acts_comb[3] = {}
for i in np.arange(2):
fb_acts[0][i] = {}
fb_acts[1][i] = {}
fb_acts[2][i] = {}
fb_acts[3][i] = {}
fb_acts_comb[0][i] = {}
fb_acts_comb[1][i] = {}
fb_acts_comb[2][i] = {}
fb_acts_comb[3][i] = {}
for j in np.arange(3):
fb_acts[0][i][j] = {}
fb_acts[1][i][j] = {}
if j > 0:
fb_acts[2][i][j-1] = {}
if j > 1:
fb_acts[3][i][j-2] = {}
actvs[0][0] = F.relu(x) - F.relu(x-1)
c1 = F.relu(self.conv1(actvs[0][0]))
actvs[1][0] = self.pool(c1)
c2 = F.relu(self.conv2(actvs[1][0]))
actvs[2][0] = self.pool(c2)
actvs[3][0] = F.relu(self.fc1(actvs[2][0].view(-1, self.n_feats*2 * 9 * 9)))
actvs[4] = actvs[3][0]
if self.t_steps > 0:
for t in np.arange(self.t_steps-1):
fb_acts[0][0][0][t] = self.t_flag*self.c1xb(actvs[1][t])
fb_acts[0][0][1][t] = self.t_flag*self.c2xb(actvs[2][t])
fb_acts[0][0][2][t] = self.t_flag*(self.fc1xb(actvs[3][t])).view(-1,1,100,100)
fb_acts_comb[0][0][t] = fb_acts[0][0][0][t] + fb_acts[0][0][1][t] + fb_acts[0][0][2][t]
fb_acts[0][1][0][t] = self.t_flag*self.c1xg(actvs[1][t])
fb_acts[0][1][1][t] = self.t_flag*self.c2xg(actvs[2][t])
fb_acts[0][1][2][t] = self.t_flag*(self.fc1xg(actvs[3][t])).view(-1,1,100,100)
fb_acts_comb[0][1][t] = fb_acts[0][1][0][t] + fb_acts[0][1][1][t] + fb_acts[0][1][2][t]
dumh000 = (x + self.b_flag*(self.t_flag*(self.c1xb(actvs[1][t])+self.c2xb(actvs[2][t])+(self.fc1xb(actvs[3][t])).view(-1,1,100,100)))) * (1.+self.g_flag*self.t_flag*(self.c1xg(actvs[1][t])+self.c2xg(actvs[2][t])+(self.fc1xg(actvs[3][t])).view(-1,1,100,100)))
actvs[0][t+1] = (F.relu(dumh000) - F.relu(dumh000-1))
fb_acts[1][0][0][t] = self.l_flag*self.c1c1b(c1)
fb_acts[1][0][1][t] = self.t_flag*self.c2c1b(actvs[2][t])
fb_acts[1][0][2][t] = self.t_flag*(self.fc1c1b(actvs[3][t])).view(-1,self.n_feats,96,96)
fb_acts_comb[1][0][t] = fb_acts[1][0][0][t] + fb_acts[1][0][1][t] + fb_acts[1][0][2][t]
fb_acts[1][1][0][t] = self.l_flag*self.c1c1g(c1)
fb_acts[1][1][1][t] = self.t_flag*self.c2c1g(actvs[2][t])
fb_acts[1][1][2][t] = self.t_flag*(self.fc1c1g(actvs[3][t])).view(-1,self.n_feats,96,96)
fb_acts_comb[1][1][t] = fb_acts[1][1][0][t] + fb_acts[1][1][1][t] + fb_acts[1][1][2][t]
c1 = F.relu(self.conv1(actvs[0][t+1])+self.b_flag*(self.l_flag*self.c1c1b(c1)+self.t_flag*(self.c2c1b(actvs[2][t])+(self.fc1c1b(actvs[3][t])).view(-1,self.n_feats,96,96)))) * (1.+self.g_flag*(self.l_flag*self.c1c1g(c1)+self.t_flag*(self.c2c1g(actvs[2][t])+(self.fc1c1g(actvs[3][t])).view(-1,self.n_feats,96,96))))
actvs[1][t+1] = self.pool(c1)
fb_acts[2][0][0][t] = self.l_flag*self.c2c2b(c2)
fb_acts[2][0][1][t] = self.t_flag*(self.fc1c2b(actvs[3][t])).view(-1,self.n_feats*2,28,28)
fb_acts_comb[2][0][t] = fb_acts[2][0][0][t] + fb_acts[2][0][1][t]
fb_acts[2][1][0][t] = self.l_flag*self.c2c2g(c2)
fb_acts[2][1][1][t] = self.t_flag*(self.fc1c2g(actvs[3][t])).view(-1,self.n_feats*2,28,28)
fb_acts_comb[2][1][t] = fb_acts[2][1][0][t] + fb_acts[2][1][1][t]
c2 = F.relu(self.conv2(actvs[1][t+1])+self.b_flag*(self.l_flag*self.c2c2b(c2)+self.t_flag*(self.fc1c2b(actvs[3][t])).view(-1,self.n_feats*2,28,28))) * (1.+self.g_flag*(self.l_flag*self.c2c2g(c2)+self.t_flag*(self.fc1c2g(actvs[3][t])).view(-1,self.n_feats*2,28,28)))
actvs[2][t+1] = self.pool(c2)
fb_acts[3][0][0][t] = self.l_flag*self.fc1fc1b(actvs[3][t])
fb_acts[3][1][0][t] = self.l_flag*self.fc1fc1g(actvs[3][t])
fb_acts_comb[3][0][t] = fb_acts[3][0][0][t]
fb_acts_comb[3][1][t] = fb_acts[3][1][0][t]
actvs[3][t+1] = F.relu(self.fc1(actvs[2][t+1].view(-1, self.n_feats*2 * 9 * 9))+self.b_flag*self.l_flag*self.fc1fc1b(actvs[3][t])) * (1.+self.g_flag*self.l_flag*self.fc1fc1g(actvs[3][t]))
actvs[4] = torch.cat((actvs[4],actvs[3][t+1]),1)
actvs[5] = torch.log(torch.clamp(F.softmax(self.fc2(actvs[4]),dim=1),1e-10,1.0))
return actvs, fb_acts, fb_acts_comb
preprocess = transforms.Compose(
[transforms.ToTensor()])
# Xavier intialisation
def weights_init(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
if __name__ == '__main__':
############################# NETWORK PARAMETERS ##################################
n_feats = 8 # in Conv layer 1
ker_size = 5 # in Conv layer 1
b_h = 0 # bias modulation flag
g_h = 1 # gain modulation flag
l_h = 1 # lateral interactions flag
t_h = 1 # top-down interactions flag
net_num = 5 # id of current network
t_steps = 4 # number of timesteps
net_save_str = 'rnn_bglt_'+str(b_h)+str(g_h)+str(l_h)+str(t_h)+'_t_'+str(t_steps)+'_num_'+str(net_num)
print(net_save_str)
net = RNNet_all_fbr(n_feats,ker_size,t_steps,b_h,g_h,l_h,t_h)
net.apply(weights_init)
net = net.float()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
print('Net created!')
net.load_state_dict(torch.load(net_save_str+'.pth',map_location=torch.device('cpu')))
net.eval()
device = torch.device("cpu")
net.to(device)
n_reps_h = 10
dec_accs = np.zeros([4,t_steps,6,2,n_reps_h]) # layers, timepoints, fact (cat,pos_x,pos_y,scale,orientation,domain), im (org,clean)
out_accs = np.zeros([2,n_reps_h]) # timepoints, im (org,clean)
fbr_accs_all_comb = np.zeros([4,t_steps-1,2,6,n_reps_h])
for nrh in np.arange(n_reps_h):
############################# Image generation ##################################
n_mult = 10
inputs_t,inputs_t_c,labels_t,pos_x_t,pos_y_t,size_t,rot_t = gen_images(100*n_mult,2)
inputs_t = torch.from_numpy(inputs_t).float()
labels_t = torch.from_numpy(labels_t).float()
inputs_t_c = torch.from_numpy(inputs_t_c).float()
pos_x_t = torch.from_numpy(pos_x_t).float()
pos_y_t = torch.from_numpy(pos_y_t).float()
size_t = torch.from_numpy(size_t).float()
rot_t = torch.from_numpy(rot_t).float()
############################# Decoding variables from layers and overall performance ##################################
for j in range(2):
if j == 0:
outputs,_,out_fbr_comb = net(inputs_t.float())
else:
outputs,_,_ = net(inputs_t_c.float())
_, predicted = torch.max(outputs[5].data, 1)
total = labels_t.size(0)
correct = np.sum(predicted.numpy() == torch.max(labels_t, 1)[1].numpy())
out_accs[j][nrh] = correct / total
for k in range(4):
for l in range(t_steps):
print(j,k,l)
DataH = outputs[k][l].detach().numpy()
DataH = np.reshape(DataH,(100*n_mult,-1))
# Category - 400 samples train, 100 test
lin_clf_pos = svm.LinearSVC()
labels_t_h = torch.max(labels_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], labels_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:,:])
dec_accs[k][l][0][j][nrh] = np.mean(Predicted_lab==labels_t_h[80*n_mult:])
# Category domain
lin_clf_pos = svm.LinearSVC()
labels_t_h = torch.max(labels_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], (labels_t_h[:80*n_mult]>9)*1)
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:,:])
dec_accs[k][l][5][j][nrh] = np.mean(Predicted_lab==((labels_t_h[80*n_mult:]>9)*1))
# Pos_x
lin_clf_pos = svm.LinearSVC()
pos_x_t_h = torch.max(pos_x_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], pos_x_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
dec_accs[k][l][1][j][nrh] = np.mean(Predicted_lab==pos_x_t_h[80*n_mult:100*n_mult])
# Pos_y
lin_clf_pos = svm.LinearSVC()
pos_y_t_h = torch.max(pos_y_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], pos_y_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
dec_accs[k][l][2][j][nrh] = np.mean(Predicted_lab==pos_y_t_h[80*n_mult:100*n_mult])
# Size
lin_clf_pos = svm.LinearSVC()
size_t_h = torch.max(size_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], size_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
dec_accs[k][l][3][j][nrh] = np.mean(Predicted_lab==size_t_h[80*n_mult:100*n_mult])
# Rot
lin_clf_pos = svm.LinearSVC()
rot_t_h = torch.max(rot_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], rot_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
dec_accs[k][l][4][j][nrh] = np.mean(Predicted_lab==rot_t_h[80*n_mult:100*n_mult])
############################# Decoding variables from overall recurrent flow to layers ##################################
for j in np.arange(4):
for k in np.arange(2):
for l in np.arange(t_steps-1):
print(j,k,l)
DataH = out_fbr_comb[j][k][l].detach().numpy()
DataH = np.reshape(DataH,(100*n_mult,-1))
# Category - 400 samples train, 100 test
lin_clf_pos = svm.LinearSVC()
labels_t_h = torch.max(labels_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], labels_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:,:])
fbr_accs_all_comb[j][l][k][0][nrh] = np.mean(Predicted_lab==labels_t_h[80*n_mult:])
# Category domain
lin_clf_pos = svm.LinearSVC()
labels_t_h = torch.max(labels_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], (labels_t_h[:80*n_mult]>9)*1)
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:,:])
fbr_accs_all_comb[j][l][k][5][nrh] = np.mean(Predicted_lab==((labels_t_h[80*n_mult:]>9)*1))
# Pos_x
lin_clf_pos = svm.LinearSVC()
pos_x_t_h = torch.max(pos_x_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], pos_x_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
fbr_accs_all_comb[j][l][k][1][nrh] = np.mean(Predicted_lab==pos_x_t_h[80*n_mult:100*n_mult])
# Pos_y
lin_clf_pos = svm.LinearSVC()
pos_y_t_h = torch.max(pos_y_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], pos_y_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
fbr_accs_all_comb[j][l][k][2][nrh] = np.mean(Predicted_lab==pos_y_t_h[80*n_mult:100*n_mult])
# Size
lin_clf_pos = svm.LinearSVC()
size_t_h = torch.max(size_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], size_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
fbr_accs_all_comb[j][l][k][3][nrh] = np.mean(Predicted_lab==size_t_h[80*n_mult:100*n_mult])
# Rot
lin_clf_pos = svm.LinearSVC()
rot_t_h = torch.max(rot_t, 1)[1].numpy()
lin_clf_pos.fit(DataH[:80*n_mult,:], rot_t_h[:80*n_mult])
Predicted_lab = lin_clf_pos.predict(DataH[80*n_mult:100*n_mult,:])
fbr_accs_all_comb[j][l][k][4][nrh] = np.mean(Predicted_lab==rot_t_h[80*n_mult:100*n_mult])
dec_accs_str = 'dec_acc'+net_save_str+'.npy'
with open(dec_accs_str, 'wb') as f:
np.save(f, dec_accs)
np.save(f, out_accs)
np.save(f, fbr_accs_all_comb)
| 23,313 | 54.509524 | 462 | py |
svrhm21_RNN_explain | svrhm21_RNN_explain-main/RNN_perturb.py | # Script to perform perturbation analyses on the trained RNN
# Requires tensorflow 1.13, python 3.7, scikit-learn, and pytorch 1.6.0
############################# IMPORTING MODULES ##################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import tensorflow as tf
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.interpolation import rotate
from random import shuffle
from sklearn import svm
from scipy import ndimage
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
fmnist = input_data.read_data_sets('fMNIST_data', one_hot=True)
############################# FUNCTIONS DEFINED ##################################
# A function to scramble image chunks
def im_scram(im,parts_h): # scramble parts_h*parts_h equal parts of the given image
win_prop = parts_h
dimsh = np.shape(im)
im_new = np.zeros(dimsh)
dimsh_win = np.floor(dimsh[0]/win_prop)
n_cells = np.square(np.int(dimsh[0]/dimsh_win))
cell_c = np.int(dimsh[0]/dimsh_win)
ind_new = np.linspace(0,n_cells-1,n_cells).astype('int32')
while np.mean(ind_new == np.linspace(0,n_cells-1,n_cells).astype('int32')) == 1:
shuffle(ind_new)
for i in range(n_cells):
j = ind_new[i]
im_new[np.int(np.mod(i,cell_c)*dimsh_win):np.int(np.mod(i,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(i*1./cell_c*1.)*dimsh_win):np.int(np.floor(i*1./cell_c*1.)*dimsh_win+dimsh_win)] = im[
np.int(np.mod(j,cell_c)*dimsh_win):np.int(np.mod(j,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(j*1./cell_c*1.)*dimsh_win):np.int(np.floor(j*1./cell_c*1.)*dimsh_win+dimsh_win)]
return im_new
# A function to generate images and the perturbed versions for analysis
def gen_images(n_imgs,n_set): # n_imgsx6 required, set used (0 train, 1 val, 2 test) 8 objects in image (1 is intact), 2 levels of zoom, rotation and x/y pos for each object
imgs_h = np.zeros([n_imgs,1,100,100])
imgs_h_xswap = np.zeros([n_imgs,1,100,100])
imgs_h_yswap = np.zeros([n_imgs,1,100,100])
imgs_h_rotswap = np.zeros([n_imgs,1,100,100])
imgs_h_sizeswap = np.zeros([n_imgs,1,100,100])
imgs_h_catswap_w = np.zeros([n_imgs,1,100,100])
imgs_h_catswap_b = np.zeros([n_imgs,1,100,100])
labs_h = np.zeros([n_imgs,20])
pos_x_h = np.zeros([n_imgs,2])
pos_y_h = np.zeros([n_imgs,2])
size_h = np.zeros([n_imgs,2])
rot_h = np.zeros([n_imgs,2])
n_objs = 8
for n_im in np.arange(n_imgs):
inst_img = np.zeros([100,100])
inst_img_xswap = np.zeros([100,100])
inst_img_yswap = np.zeros([100,100])
inst_img_rotswap = np.zeros([100,100])
inst_img_sizeswap = np.zeros([100,100])
inst_img_catswap_w = np.zeros([100,100])
inst_img_catswap_b = np.zeros([100,100])
obj_ord = np.linspace(0,n_objs-1,n_objs)
dum_obj_ind = 4+np.random.randint(n_objs/2)
dum_obj_ind_xswap = np.int(5.5 + np.sign(dum_obj_ind - 5.5)*(2 - np.abs(dum_obj_ind - 5.5)))
dum_obj_ind_yswap = np.int(2*(5-np.floor(dum_obj_ind/2))+np.mod(dum_obj_ind,2))
dum_dat_ord = (np.random.random(8) < 0.5)*1.
for i in np.arange(n_objs):
if dum_dat_ord[i] == 0: # dataset M or F
if n_set == 0:
dathh = mnist.train
elif n_set == 1:
dathh = mnist.validation
elif n_set == 2:
dathh = mnist.test
inst_obj_ind = np.random.randint(np.shape(dathh.images)[0])
if i == dum_obj_ind:
inst_lab = np.where(dathh.labels[inst_obj_ind,:]==1)[0][0]
inst_obj = np.reshape(dathh.images[inst_obj_ind,:],(28,28))
else:
if n_set == 0:
dathh = fmnist.train
elif n_set == 1:
dathh = fmnist.validation
elif n_set == 2:
dathh = fmnist.test
inst_obj_ind = np.random.randint(np.shape(dathh.images)[0])
if i == dum_obj_ind:
inst_lab = 10 + np.where(dathh.labels[inst_obj_ind,:]==1)[0][0]
inst_obj = np.reshape(dathh.images[inst_obj_ind,:],(28,28))
if i == dum_obj_ind:
if dum_dat_ord[i] == 0: # dataset M or F
if n_set == 0:
dathh = mnist.train
elif n_set == 1:
dathh = mnist.validation
elif n_set == 2:
dathh = mnist.test
inst_obj_ind_catswap_w = np.random.randint(np.shape(dathh.images)[0])
while np.where(dathh.labels[inst_obj_ind_catswap_w,:]==1)[0][0] == inst_lab:
inst_obj_ind_catswap_w = np.random.randint(np.shape(dathh.images)[0])
inst_obj_catswap_w = np.reshape(dathh.images[inst_obj_ind_catswap_w,:],(28,28))
if n_set == 0:
dathh = fmnist.train
elif n_set == 1:
dathh = fmnist.validation
elif n_set == 2:
dathh = fmnist.test
inst_obj_ind_catswap_b = np.random.randint(np.shape(dathh.images)[0])
inst_obj_catswap_b = np.reshape(dathh.images[inst_obj_ind_catswap_b,:],(28,28))
else:
if n_set == 0:
dathh = fmnist.train
elif n_set == 1:
dathh = fmnist.validation
elif n_set == 2:
dathh = fmnist.test
inst_obj_ind_catswap_w = np.random.randint(np.shape(dathh.images)[0])
while np.where(dathh.labels[inst_obj_ind_catswap_w,:]==1)[0][0] == inst_lab:
inst_obj_ind_catswap_w = np.random.randint(np.shape(dathh.images)[0])
inst_obj_catswap_w = np.reshape(dathh.images[inst_obj_ind_catswap_w,:],(28,28))
if n_set == 0:
dathh = mnist.train
elif n_set == 1:
dathh = mnist.validation
elif n_set == 2:
dathh = mnist.test
inst_obj_ind_catswap_b = np.random.randint(np.shape(dathh.images)[0])
inst_obj_catswap_b = np.reshape(dathh.images[inst_obj_ind_catswap_b,:],(28,28))
dumh111 = (np.random.random(1)[0] > 0.5)*1
if dumh111 == 0: # zoom 0.9 or 1.5
inst_obj = zoom(inst_obj,0.8)
if i == dum_obj_ind:
inst_obj_sizeswap = zoom(inst_obj,1.6)
inst_obj_catswap_w = zoom(inst_obj_catswap_w,0.8)
inst_obj_catswap_b = zoom(inst_obj_catswap_b,0.8)
else:
inst_obj = zoom(inst_obj,1.6)
if i == dum_obj_ind:
inst_obj_sizeswap = zoom(inst_obj,0.8)
inst_obj_catswap_w = zoom(inst_obj_catswap_w,1.6)
inst_obj_catswap_b = zoom(inst_obj_catswap_b,1.6)
if i == dum_obj_ind:
size_h[n_im,dumh111] = 1.
dumh111 = (np.random.random(1)[0] > 0.5)*1
if dumh111 == 0: # rotate 30 or -30
dumrot = 35
inst_obj = rotate(inst_obj,dumrot,reshape=False)
if i == dum_obj_ind:
inst_obj_rotswap = rotate(inst_obj,-35,reshape=False)
inst_obj_sizeswap = rotate(inst_obj_sizeswap,dumrot,reshape=False)
inst_obj_catswap_w = rotate(inst_obj_catswap_w,dumrot,reshape=False)
inst_obj_catswap_b = rotate(inst_obj_catswap_b,dumrot,reshape=False)
else:
dumrot = -35
inst_obj = rotate(inst_obj,dumrot,reshape=False) # rotate -25 to -35
if i == dum_obj_ind:
inst_obj_rotswap = rotate(inst_obj,35,reshape=False)
inst_obj_sizeswap = rotate(inst_obj_sizeswap,dumrot,reshape=False)
inst_obj_catswap_w = rotate(inst_obj_catswap_w,dumrot,reshape=False)
inst_obj_catswap_b = rotate(inst_obj_catswap_b,dumrot,reshape=False)
if i == dum_obj_ind:
rot_h[n_im,dumh111] = 1.
if i != dum_obj_ind:
inst_obj = im_scram(inst_obj,3) # scrambled if not object of interest
if np.mod(obj_ord[i],4) == 0: # x_loc up or down
x_loc = np.int(np.round(25))
y_loc = np.int(np.round(25))
x_loc_xswap = np.int(np.round(75))
y_loc_yswap = np.int(np.round(75))
if i == dum_obj_ind:
pos_y_h[n_im,0] = 1.
pos_x_h[n_im,0] = 1.
elif np.mod(obj_ord[i],4) == 1:
x_loc = np.int(np.round(75)) # 75 +- 2.5
y_loc = np.int(np.round(25)) # 25 +- 2.5
x_loc_xswap = np.int(np.round(25))
y_loc_yswap = np.int(np.round(75))
if i == dum_obj_ind:
pos_y_h[n_im,1] = 1.
pos_x_h[n_im,0] = 1.
elif np.mod(obj_ord[i],4) == 2:
x_loc = np.int(np.round(25)) # 25 +- 2.5
y_loc = np.int(np.round(75)) # 75 +- 2.5
x_loc_xswap = np.int(np.round(75))
y_loc_yswap = np.int(np.round(25))
if i == dum_obj_ind:
pos_y_h[n_im,0] = 1.
pos_x_h[n_im,1] = 1.
elif np.mod(obj_ord[i],4) == 3:
x_loc = np.int(np.round(75)) # 75 +- 2.5
y_loc = np.int(np.round(75)) # 75 +- 2.5
x_loc_xswap = np.int(np.round(25))
y_loc_yswap = np.int(np.round(25))
if i == dum_obj_ind:
pos_y_h[n_im,1] = 1.
pos_x_h[n_im,1] = 1.
inst_obj = (inst_obj-np.min(inst_obj))/(np.max(inst_obj)-np.min(inst_obj))
if i == dum_obj_ind:
inst_obj_rotswap = (inst_obj_rotswap-np.min(inst_obj_rotswap))/(np.max(inst_obj_rotswap)-np.min(inst_obj_rotswap))
inst_obj_sizeswap = (inst_obj_sizeswap-np.min(inst_obj_sizeswap))/(np.max(inst_obj_sizeswap)-np.min(inst_obj_sizeswap))
inst_obj_catswap_w = (inst_obj_catswap_w-np.min(inst_obj_catswap_w))/(np.max(inst_obj_catswap_w)-np.min(inst_obj_catswap_w))
inst_obj_catswap_b = (inst_obj_catswap_b-np.min(inst_obj_catswap_b))/(np.max(inst_obj_catswap_b)-np.min(inst_obj_catswap_b))
# print(np.int(np.floor(np.shape(inst_obj)[0]/2)),np.int(np.ceil(np.shape(inst_obj)[0]/2)),np.shape(inst_obj)[0])
inst_img[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
if i != dum_obj_ind and i != dum_obj_ind_xswap and i != dum_obj_ind_yswap:
inst_img_xswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_xswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
if i == dum_obj_ind_xswap:
inst_img_xswap[x_loc_xswap-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc_xswap+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_xswap[x_loc_xswap-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc_xswap+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
if i == dum_obj_ind_yswap:
inst_img_xswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_xswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc_yswap-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc_yswap+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc_yswap-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc_yswap+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
if i == dum_obj_ind:
inst_img_xswap[x_loc_xswap-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc_xswap+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_xswap[x_loc_xswap-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc_xswap+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc_yswap-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc_yswap+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img_yswap[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc_yswap-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc_yswap+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj_rotswap)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_rotswap)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_rotswap)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_rotswap)[1]/2.))] = (1-inst_obj_rotswap)*inst_img_rotswap[x_loc-np.int(np.floor(np.shape(inst_obj_rotswap)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_rotswap)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_rotswap)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_rotswap)[1]/2.))] + (inst_obj_rotswap)*inst_obj_rotswap
inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj_sizeswap)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_sizeswap)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_sizeswap)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_sizeswap)[1]/2.))] = (1-inst_obj_sizeswap)*inst_img_sizeswap[x_loc-np.int(np.floor(np.shape(inst_obj_sizeswap)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_sizeswap)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_sizeswap)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_sizeswap)[1]/2.))] + (inst_obj_sizeswap)*inst_obj_sizeswap
inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj_catswap_w)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_catswap_w)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_catswap_w)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_catswap_w)[1]/2.))] = (1-inst_obj_catswap_w)*inst_img_catswap_w[x_loc-np.int(np.floor(np.shape(inst_obj_catswap_w)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_catswap_w)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_catswap_w)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_catswap_w)[1]/2.))] + (inst_obj_catswap_w)*inst_obj_catswap_w
inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj_catswap_b)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_catswap_b)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_catswap_b)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_catswap_b)[1]/2.))] = (1-inst_obj_catswap_b)*inst_img_catswap_b[x_loc-np.int(np.floor(np.shape(inst_obj_catswap_b)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj_catswap_b)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj_catswap_b)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj_catswap_b)[1]/2.))] + (inst_obj_catswap_b)*inst_obj_catswap_b
inst_img = (inst_img-np.min(inst_img))/(np.max(inst_img)-np.min(inst_img))
inst_img_xswap = (inst_img_xswap-np.min(inst_img_xswap))/(np.max(inst_img_xswap)-np.min(inst_img_xswap))
inst_img_yswap = (inst_img_yswap-np.min(inst_img_yswap))/(np.max(inst_img_yswap)-np.min(inst_img_yswap))
inst_img_rotswap = (inst_img_rotswap-np.min(inst_img_rotswap))/(np.max(inst_img_rotswap)-np.min(inst_img_rotswap))
inst_img_sizeswap = (inst_img_sizeswap-np.min(inst_img_sizeswap))/(np.max(inst_img_sizeswap)-np.min(inst_img_sizeswap))
inst_img_catswap_w = (inst_img_catswap_w-np.min(inst_img_catswap_w))/(np.max(inst_img_catswap_w)-np.min(inst_img_catswap_w))
inst_img_catswap_b = (inst_img_catswap_b-np.min(inst_img_catswap_b))/(np.max(inst_img_catswap_b)-np.min(inst_img_catswap_b))
if np.isnan(np.min(inst_img)) or np.isnan(np.min(inst_img_xswap)) or np.isnan(np.min(inst_img_yswap)) or np.isnan(np.min(inst_img_rotswap)) or np.isnan(np.min(inst_img_sizeswap)) or np.isnan(np.min(inst_img_catswap_w)) or np.isnan(np.min(inst_img_catswap_b)):
print('NaN in input')
exit(1)
imgs_h[n_im,0,:,:] = inst_img
imgs_h_xswap[n_im,0,:,:] = inst_img_xswap
imgs_h_yswap[n_im,0,:,:] = inst_img_yswap
imgs_h_rotswap[n_im,0,:,:] = inst_img_rotswap
imgs_h_sizeswap[n_im,0,:,:] = inst_img_sizeswap
imgs_h_catswap_w[n_im,0,:,:] = inst_img_catswap_w
imgs_h_catswap_b[n_im,0,:,:] = inst_img_catswap_b
labs_h[n_im,inst_lab] = 1.
return imgs_h,imgs_h_xswap,imgs_h_yswap,imgs_h_rotswap,imgs_h_sizeswap,imgs_h_catswap_w,imgs_h_catswap_b,labs_h,pos_x_h,pos_y_h,size_h,rot_h
# Defining the RNN class for extracting representations and original recurrent flows
class RNNet_all_fbr(nn.Module):
def __init__(self, n_feats=8, ker_size=5,t_steps=3,b_flag=0,g_flag=1,l_flag=1,t_flag=1):
super(RNNet_all_fbr, self).__init__()
self.conv1 = nn.Conv2d(1, n_feats, ker_size)
self.pool = nn.MaxPool2d(3, 3)
self.conv2 = nn.Conv2d(n_feats, n_feats*2, ker_size)
self.fc1 = nn.Linear(n_feats*2 * 9 * 9, n_feats*16)
self.fc2 = nn.Linear(n_feats*16*t_steps, 20)
self.dropout = nn.Dropout(0.5)
self.c1xb = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xb = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xb = nn.Linear(n_feats*16, 100*100)
self.c1c1b = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1b = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1b = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2b = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2b = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1b = nn.Linear(n_feats*16, n_feats*16)
self.c1xg = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xg = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xg = nn.Linear(n_feats*16, 100*100)
self.c1c1g = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1g = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1g = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2g = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2g = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1g = nn.Linear(n_feats*16, n_feats*16)
self.n_feats = n_feats
self.t_steps = t_steps
self.b_flag = b_flag
self.g_flag = g_flag
self.l_flag = l_flag
self.t_flag = t_flag
def forward(self, x):
actvs = {}
actvs[0] = {}
actvs[1] = {}
actvs[2] = {}
actvs[3] = {}
fb_acts = {}
fb_acts[0] = {}
fb_acts[1] = {}
fb_acts[2] = {}
fb_acts[3] = {}
fb_acts_comb = {}
fb_acts_comb[0] = {}
fb_acts_comb[1] = {}
fb_acts_comb[2] = {}
fb_acts_comb[3] = {}
for i in np.arange(2):
fb_acts[0][i] = {}
fb_acts[1][i] = {}
fb_acts[2][i] = {}
fb_acts[3][i] = {}
fb_acts_comb[0][i] = {}
fb_acts_comb[1][i] = {}
fb_acts_comb[2][i] = {}
fb_acts_comb[3][i] = {}
for j in np.arange(3):
fb_acts[0][i][j] = {}
fb_acts[1][i][j] = {}
if j > 0:
fb_acts[2][i][j-1] = {}
if j > 1:
fb_acts[3][i][j-2] = {}
actvs[0][0] = F.relu(x) - F.relu(x-1)
c1 = F.relu(self.conv1(actvs[0][0]))
actvs[1][0] = self.pool(c1)
c2 = F.relu(self.conv2(actvs[1][0]))
actvs[2][0] = self.pool(c2)
actvs[3][0] = F.relu(self.fc1(actvs[2][0].view(-1, self.n_feats*2 * 9 * 9)))
actvs[4] = actvs[3][0]
if self.t_steps > 0:
for t in np.arange(self.t_steps-1):
fb_acts[0][0][0][t] = self.t_flag*self.c1xb(actvs[1][t])
fb_acts[0][0][1][t] = self.t_flag*self.c2xb(actvs[2][t])
fb_acts[0][0][2][t] = self.t_flag*(self.fc1xb(actvs[3][t])).view(-1,1,100,100)
fb_acts_comb[0][0][t] = self.b_flag*(fb_acts[0][0][0][t] + fb_acts[0][0][1][t] + fb_acts[0][0][2][t])
fb_acts[0][1][0][t] = self.t_flag*self.c1xg(actvs[1][t])
fb_acts[0][1][1][t] = self.t_flag*self.c2xg(actvs[2][t])
fb_acts[0][1][2][t] = self.t_flag*(self.fc1xg(actvs[3][t])).view(-1,1,100,100)
fb_acts_comb[0][1][t] = self.g_flag*(fb_acts[0][1][0][t] + fb_acts[0][1][1][t] + fb_acts[0][1][2][t])
dumh000 = (x + self.b_flag*(self.t_flag*(self.c1xb(actvs[1][t])+self.c2xb(actvs[2][t])+(self.fc1xb(actvs[3][t])).view(-1,1,100,100)))) * (1.+self.g_flag*self.t_flag*(self.c1xg(actvs[1][t])+self.c2xg(actvs[2][t])+(self.fc1xg(actvs[3][t])).view(-1,1,100,100)))
actvs[0][t+1] = (F.relu(dumh000) - F.relu(dumh000-1))
fb_acts[1][0][0][t] = self.l_flag*self.c1c1b(c1)
fb_acts[1][0][1][t] = self.t_flag*self.c2c1b(actvs[2][t])
fb_acts[1][0][2][t] = self.t_flag*(self.fc1c1b(actvs[3][t])).view(-1,self.n_feats,96,96)
fb_acts_comb[1][0][t] = self.b_flag*(fb_acts[1][0][0][t] + fb_acts[1][0][1][t] + fb_acts[1][0][2][t])
fb_acts[1][1][0][t] = self.l_flag*self.c1c1g(c1)
fb_acts[1][1][1][t] = self.t_flag*self.c2c1g(actvs[2][t])
fb_acts[1][1][2][t] = self.t_flag*(self.fc1c1g(actvs[3][t])).view(-1,self.n_feats,96,96)
fb_acts_comb[1][1][t] = self.g_flag*(fb_acts[1][1][0][t] + fb_acts[1][1][1][t] + fb_acts[1][1][2][t])
c1 = F.relu(self.conv1(actvs[0][t+1])+self.b_flag*(self.l_flag*self.c1c1b(c1)+self.t_flag*(self.c2c1b(actvs[2][t])+(self.fc1c1b(actvs[3][t])).view(-1,self.n_feats,96,96)))) * (1.+self.g_flag*(self.l_flag*self.c1c1g(c1)+self.t_flag*(self.c2c1g(actvs[2][t])+(self.fc1c1g(actvs[3][t])).view(-1,self.n_feats,96,96))))
actvs[1][t+1] = self.pool(c1)
fb_acts[2][0][0][t] = self.l_flag*self.c2c2b(c2)
fb_acts[2][0][1][t] = self.t_flag*(self.fc1c2b(actvs[3][t])).view(-1,self.n_feats*2,28,28)
fb_acts_comb[2][0][t] = self.b_flag*(fb_acts[2][0][0][t] + fb_acts[2][0][1][t])
fb_acts[2][1][0][t] = self.l_flag*self.c2c2g(c2)
fb_acts[2][1][1][t] = self.t_flag*(self.fc1c2g(actvs[3][t])).view(-1,self.n_feats*2,28,28)
fb_acts_comb[2][1][t] = self.g_flag*(fb_acts[2][1][0][t] + fb_acts[2][1][1][t])
c2 = F.relu(self.conv2(actvs[1][t+1])+self.b_flag*(self.l_flag*self.c2c2b(c2)+self.t_flag*(self.fc1c2b(actvs[3][t])).view(-1,self.n_feats*2,28,28))) * (1.+self.g_flag*(self.l_flag*self.c2c2g(c2)+self.t_flag*(self.fc1c2g(actvs[3][t])).view(-1,self.n_feats*2,28,28)))
actvs[2][t+1] = self.pool(c2)
fb_acts[3][0][0][t] = self.l_flag*self.fc1fc1b(actvs[3][t])
fb_acts[3][1][0][t] = self.l_flag*self.fc1fc1g(actvs[3][t])
fb_acts_comb[3][0][t] = self.b_flag*fb_acts[3][0][0][t]
fb_acts_comb[3][1][t] = self.g_flag*fb_acts[3][1][0][t]
actvs[3][t+1] = F.relu(self.fc1(actvs[2][t+1].view(-1, self.n_feats*2 * 9 * 9))+self.b_flag*self.l_flag*self.fc1fc1b(actvs[3][t])) * (1.+self.g_flag*self.l_flag*self.fc1fc1g(actvs[3][t]))
actvs[4] = torch.cat((actvs[4],actvs[3][t+1]),1)
actvs[5] = torch.log(torch.clamp(F.softmax(self.fc2(actvs[4]),dim=1),1e-10,1.0))
return fb_acts_comb
# Defining the RNN class to be able to take perturbed feedback (incoming at T) and apply it to that sweep while keeping previous sweeps constant and naturally executing the subsequent sweeps
class RNNet_1step(nn.Module):
def __init__(self, n_feats=8, ker_size=5,t_steps=3,b_flag=0,g_flag=1,l_flag=1,t_flag=1):
super(RNNet_1step, self).__init__()
self.conv1 = nn.Conv2d(1, n_feats, ker_size)
self.pool = nn.MaxPool2d(3, 3)
self.conv2 = nn.Conv2d(n_feats, n_feats*2, ker_size)
self.fc1 = nn.Linear(n_feats*2 * 9 * 9, n_feats*16)
self.fc2 = nn.Linear(n_feats*16*t_steps, 20)
self.dropout = nn.Dropout(0.5)
self.c1xb = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xb = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xb = nn.Linear(n_feats*16, 100*100)
self.c1c1b = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1b = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1b = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2b = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2b = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1b = nn.Linear(n_feats*16, n_feats*16)
self.c1xg = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xg = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xg = nn.Linear(n_feats*16, 100*100)
self.c1c1g = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1g = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1g = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2g = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2g = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1g = nn.Linear(n_feats*16, n_feats*16)
self.n_feats = n_feats
self.t_steps = t_steps
self.b_flag = b_flag
self.g_flag = g_flag
self.l_flag = l_flag
self.t_flag = t_flag
def forward(self, x, fb_acts_comb_org, fb_acts_comb_pert, pert_layer, pert_type, pert_time):
fb_b = {}
fb_g = {}
for lay in np.arange(4):
if pert_layer == lay:
if pert_type == 0: # feed original feedback at selected layer
fb_b[lay] = fb_acts_comb_org[lay][0][pert_time]
fb_g[lay] = fb_acts_comb_org[lay][1][pert_time]
elif pert_type == 1: # feed perturbed feedback at selected layer
fb_b[lay] = fb_acts_comb_pert[lay][0][pert_time]
fb_g[lay] = fb_acts_comb_pert[lay][1][pert_time]
elif pert_type == 2: # feed control feedback at selected layer
fb_random_b = fb_acts_comb_pert[lay][0][pert_time].detach().clone()
fb_random_g = fb_acts_comb_pert[lay][1][pert_time].detach().clone()
for imgh in np.arange(list(fb_random_b.size())[0]):
fb_random_b_diff = fb_acts_comb_pert[lay][0][pert_time][imgh] - fb_acts_comb_org[lay][0][pert_time][imgh]
idx = torch.randperm(fb_random_b_diff.nelement())
fb_random_b[imgh] = fb_random_b_diff.view(-1)[idx].view(fb_random_b_diff.size()) + fb_acts_comb_org[lay][0][pert_time][imgh]
fb_random_diff = fb_acts_comb_pert[lay][1][pert_time][imgh] - fb_acts_comb_org[lay][1][pert_time][imgh]
idx = torch.randperm(fb_random_diff.nelement())
fb_random_g[imgh] = fb_random_diff.view(-1)[idx].view(fb_random_diff.size()) + fb_acts_comb_org[lay][1][pert_time][imgh]
fb_b[lay] = fb_random_b
fb_g[lay] = fb_random_g
else: # feed original feedback at all other layers
fb_b[lay] = fb_acts_comb_org[lay][0][pert_time]
fb_g[lay] = fb_acts_comb_org[lay][1][pert_time]
actvs = {}
actvs[0] = {}
actvs[1] = {}
actvs[2] = {}
actvs[3] = {}
actvs[0][0] = F.relu(x) - F.relu(x-1)
c1 = F.relu(self.conv1(actvs[0][0]))
actvs[1][0] = self.pool(c1)
c2 = F.relu(self.conv2(actvs[1][0]))
actvs[2][0] = self.pool(c2)
actvs[3][0] = F.relu(self.fc1(actvs[2][0].view(-1, self.n_feats*2 * 9 * 9)))
actvs[4] = actvs[3][0]
if self.t_steps > 0:
for t in np.arange(self.t_steps-1):
if t == pert_time:
dumh000 = (x + fb_b[0]) * (1.+fb_g[0])
else:
dumh000 = (x + self.b_flag*(self.t_flag*(self.c1xb(actvs[1][t])+self.c2xb(actvs[2][t])+(self.fc1xb(actvs[3][t])).view(-1,1,100,100)))) * (1.+self.g_flag*self.t_flag*(self.c1xg(actvs[1][t])+self.c2xg(actvs[2][t])+(self.fc1xg(actvs[3][t])).view(-1,1,100,100)))
actvs[0][t+1] = (F.relu(dumh000) - F.relu(dumh000-1))
if t == pert_time:
c1 = F.relu(self.conv1(actvs[0][t+1])+fb_b[1]) * (1.+fb_g[1])
else:
c1 = F.relu(self.conv1(actvs[0][t+1])+self.b_flag*(self.l_flag*self.c1c1b(c1)+self.t_flag*(self.c2c1b(actvs[2][t])+(self.fc1c1b(actvs[3][t])).view(-1,self.n_feats,96,96)))) * (1.+self.g_flag*(self.l_flag*self.c1c1g(c1)+self.t_flag*(self.c2c1g(actvs[2][t])+(self.fc1c1g(actvs[3][t])).view(-1,self.n_feats,96,96))))
actvs[1][t+1] = self.pool(c1)
if t == pert_time:
c2 = F.relu(self.conv2(actvs[1][t+1])+ fb_b[2]) * (1.+ fb_g[2])
else:
c2 = F.relu(self.conv2(actvs[1][t+1])+self.b_flag*(self.l_flag*self.c2c2b(c2)+self.t_flag*(self.fc1c2b(actvs[3][t])).view(-1,self.n_feats*2,28,28))) * (1.+self.g_flag*(self.l_flag*self.c2c2g(c2)+self.t_flag*(self.fc1c2g(actvs[3][t])).view(-1,self.n_feats*2,28,28)))
actvs[2][t+1] = self.pool(c2)
if t == pert_time:
actvs[3][t+1] = F.relu(self.fc1(actvs[2][t+1].view(-1, self.n_feats*2 * 9 * 9))+ fb_b[3]) * (1.+ fb_g[3])
else:
actvs[3][t+1] = F.relu(self.fc1(actvs[2][t+1].view(-1, self.n_feats*2 * 9 * 9))+self.b_flag*self.l_flag*self.fc1fc1b(actvs[3][t])) * (1.+self.g_flag*self.l_flag*self.fc1fc1g(actvs[3][t]))
actvs[4] = torch.cat((actvs[4],actvs[3][t+1]),1)
actvs[5] = torch.log(torch.clamp(F.softmax(self.fc2(actvs[4]),dim=1),1e-10,1.0))
return actvs[5]
if __name__ == '__main__':
############################# NETWORK PARAMETERS ##################################
n_feats = 8 # in Conv layer 1
ker_size = 5 # in Conv layer 1
b_h = 0 # bias/additive modulation flag
g_h = 1 # gain/multiplicative modulation flag
l_h = 1 # lateral interactions flag
t_h = 1 # top-down interactions flag
net_num = 5
t_steps = 4 # number of timesteps
net_save_str = 'rnn_bglt_'+str(b_h)+str(g_h)+str(l_h)+str(t_h)+'_t_'+str(t_steps)+'_num_'+str(net_num)
print(net_save_str)
n_ex = 1000
n_rep = 5
net_all = RNNet_all_fbr(n_feats,ker_size,4,b_h,g_h,l_h,t_h)
net_all = net_all.float()
net_all.load_state_dict(torch.load(net_save_str+'.pth',map_location=torch.device('cpu')))
net_all.eval()
net_fin = RNNet_1step(n_feats,ker_size,4,b_h,g_h,l_h,t_h)
net_fin = net_fin.float()
net_fin.load_state_dict(torch.load(net_save_str+'.pth',map_location=torch.device('cpu')))
net_fin.eval()
############################# Perturbation analysis ##################################
perturbed_accuracies = np.zeros([6,2,4,t_steps-1,n_rep]) # perturbation (pos_x/pos_y/orientation/scale/cat-within/cat-between), pert/control, layers, timesteps
original_accuracy = np.zeros([n_rep,1])
for repr in np.arange(n_rep):
imgs_h,imgs_h_xswap,imgs_h_yswap,imgs_h_rotswap,imgs_h_sizeswap,imgs_h_catswap_w,imgs_h_catswap_b,labs_h,pos_x_h,pos_y_h,size_h,rot_h = gen_images(n_ex,2)
input_img_org = torch.from_numpy(imgs_h).float()
input_img_xswap = torch.from_numpy(imgs_h_xswap).float()
input_img_yswap = torch.from_numpy(imgs_h_yswap).float()
input_img_rotswap = torch.from_numpy(imgs_h_rotswap).float()
input_img_sizeswap = torch.from_numpy(imgs_h_sizeswap).float()
input_img_catswap_w = torch.from_numpy(imgs_h_catswap_w).float()
input_img_catswap_b = torch.from_numpy(imgs_h_catswap_b).float()
labels_img = torch.from_numpy(labs_h).float()
fb_org = net_all(input_img_org)
outputs = net_fin(input_img_org,fb_org,fb_org,0,0,2)
_, predicted = torch.max(outputs.data, 1)
total = labels_img.size(0)
correct = np.sum(predicted.cpu().numpy() == torch.max(labels_img, 1)[1].cpu().numpy())
original_accuracy[repr] = correct/total
for th in np.arange(t_steps-1):
for lay in np.arange(4):
for pert in np.arange(6):
if pert == 0:
input_img_pert = input_img_xswap
elif pert == 1:
input_img_pert = input_img_yswap
elif pert == 2:
input_img_pert = input_img_rotswap
elif pert == 3:
input_img_pert = input_img_sizeswap
elif pert == 4:
input_img_pert = input_img_catswap_w
elif pert == 5:
input_img_pert = input_img_catswap_b
fb_pert = net_all(input_img_pert)
for contr in np.arange(2):
outputs = net_fin(input_img_org,fb_org,fb_pert,lay,contr+1,th)
_, predicted = torch.max(outputs.data, 1)
total = labels_img.size(0)
correct = np.sum(predicted.cpu().numpy() == torch.max(labels_img, 1)[1].cpu().numpy())
perturbed_accuracies[pert,contr,lay,th,repr] = perturbed_accuracies[pert,contr,lay,th,repr] + correct/total
print(repr,lay,pert,contr,th)
out_str = 'fb_perturb-'+'rnn_bglt_'+str(b_h)+str(g_h)+str(l_h)+str(t_h)+'_t_'+str(t_steps)+'_num_'+str(net_num)+'.npy'
with open(out_str, 'wb') as f:
np.save(f, original_accuracy)
np.save(f, perturbed_accuracies)
| 42,801 | 75.160142 | 590 | py |
svrhm21_RNN_explain | svrhm21_RNN_explain-main/RNN_gen.py | # Script to define the RNN and dataset and to train the RNN
# Requires tensorflow 1.13, python 3.7, scikit-learn, and pytorch 1.6.0
############################# IMPORTING MODULES ##################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import tensorflow as tf
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.interpolation import rotate
from random import shuffle
from sklearn import svm
from scipy import ndimage
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
fmnist = input_data.read_data_sets('fMNIST_data', one_hot=True)
############################# FUNCTIONS DEFINED ##################################
# A function to scramble image chunks
def im_scram(im,parts_h): # scramble parts_h*parts_h equal parts of the given image
win_prop = parts_h
dimsh = np.shape(im)
im_new = np.zeros(dimsh)
dimsh_win = np.floor(dimsh[0]/win_prop)
n_cells = np.square(np.int(dimsh[0]/dimsh_win))
cell_c = np.int(dimsh[0]/dimsh_win)
ind_new = np.linspace(0,n_cells-1,n_cells).astype('int32')
while np.mean(ind_new == np.linspace(0,n_cells-1,n_cells).astype('int32')) == 1:
shuffle(ind_new)
for i in range(n_cells):
j = ind_new[i]
im_new[np.int(np.mod(i,cell_c)*dimsh_win):np.int(np.mod(i,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(i*1./cell_c*1.)*dimsh_win):np.int(np.floor(i*1./cell_c*1.)*dimsh_win+dimsh_win)] = im[
np.int(np.mod(j,cell_c)*dimsh_win):np.int(np.mod(j,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(j*1./cell_c*1.)*dimsh_win):np.int(np.floor(j*1./cell_c*1.)*dimsh_win+dimsh_win)]
return im_new
# A function to generate images and the respective labels for training and testing
def gen_images(n_imgs,n_set): # n_imgs required, set used (0 train, 1 val, 2 test) 8 objects in image (1 is intact), 2 levels of zoom, rotation and x/y pos for each object
imgs_h = np.zeros([n_imgs,1,100,100])
imgs_h1 = np.zeros([n_imgs,1,100,100])
labs_h = np.zeros([n_imgs,20])
pos_x_h = np.zeros([n_imgs,2])
pos_y_h = np.zeros([n_imgs,2])
size_h = np.zeros([n_imgs,2])
rot_h = np.zeros([n_imgs,2])
n_objs = 8
for n_im in np.arange(n_imgs):
inst_img = np.zeros([100,100])
inst_img1 = np.zeros([100,100])
obj_ord = np.linspace(0,n_objs-1,n_objs)
dum_obj_ind = 4+np.random.randint(n_objs/2)
dum_dat_ord = (np.random.random(8) < 0.5)*1.
for i in np.arange(n_objs):
if dum_dat_ord[i] == 0: # dataset M or F
if n_set == 0:
dathh = mnist.train
elif n_set == 1:
dathh = mnist.validation
elif n_set == 2:
dathh = mnist.test
inst_obj_ind = np.random.randint(np.shape(dathh.images)[0])
if i == dum_obj_ind:
inst_lab = np.where(dathh.labels[inst_obj_ind,:]==1)[0][0]
inst_obj = np.reshape(dathh.images[inst_obj_ind,:],(28,28))
else:
if n_set == 0:
dathh = fmnist.train
elif n_set == 1:
dathh = fmnist.validation
elif n_set == 2:
dathh = fmnist.test
inst_obj_ind = np.random.randint(np.shape(dathh.images)[0])
if i == dum_obj_ind:
inst_lab = 10 + np.where(dathh.labels[inst_obj_ind,:]==1)[0][0]
inst_obj = np.reshape(dathh.images[inst_obj_ind,:],(28,28))
dumh111 = (np.random.random(1)[0] > 0.5)*1
if dumh111 == 0: # zoom 0.9 or 1.5
inst_obj = zoom(inst_obj,0.9+(np.random.random(1)[0]-0.5)/5.) # zoom 0.8 to 1.
else:
inst_obj = zoom(inst_obj,1.5+(np.random.random(1)[0]-0.5)/5.) # zoom 1.4 to 1.6
if i == dum_obj_ind:
size_h[n_im,dumh111] = 1.
dumh111 = (np.random.random(1)[0] > 0.5)*1
if dumh111 == 0: # rotate 30 or -30
inst_obj = rotate(inst_obj,30+(np.random.random(1)[0]-0.5)*2*5,reshape=False) # rotate 25 to 35
else:
inst_obj = rotate(inst_obj,-30+(np.random.random(1)[0]-0.5)*2*5,reshape=False) # rotate -25 to -35
if i == dum_obj_ind:
rot_h[n_im,dumh111] = 1.
if i != dum_obj_ind:
inst_obj = im_scram(inst_obj,3) # scrambled if not object of interest
if np.mod(obj_ord[i],4) == 0: # x_loc up or down
x_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
y_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,0] = 1.
pos_x_h[n_im,0] = 1.
elif np.mod(obj_ord[i],4) == 1:
x_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
y_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,1] = 1.
pos_x_h[n_im,0] = 1.
elif np.mod(obj_ord[i],4) == 2:
x_loc = np.int(np.round(25 + (np.random.random(1)[0]-0.5)*2*2.5)) # 25 +- 2.5
y_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,0] = 1.
pos_x_h[n_im,1] = 1.
elif np.mod(obj_ord[i],4) == 3:
x_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
y_loc = np.int(np.round(75 + (np.random.random(1)[0]-0.5)*2*2.5)) # 75 +- 2.5
if i == dum_obj_ind:
pos_y_h[n_im,1] = 1.
pos_x_h[n_im,1] = 1.
inst_obj = (inst_obj-np.min(inst_obj))/(np.max(inst_obj)-np.min(inst_obj))
# print(np.int(np.floor(np.shape(inst_obj)[0]/2)),np.int(np.ceil(np.shape(inst_obj)[0]/2)),np.shape(inst_obj)[0])
inst_img[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
if i == dum_obj_ind:
inst_img1[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] = (1-inst_obj)*inst_img1[x_loc-np.int(np.floor(np.shape(inst_obj)[0]/2.)):x_loc+np.int(np.ceil(np.shape(inst_obj)[0]/2.)),y_loc-np.int(np.floor(np.shape(inst_obj)[1]/2.)):y_loc+np.int(np.ceil(np.shape(inst_obj)[1]/2.))] + (inst_obj)*inst_obj
inst_img = (inst_img-np.min(inst_img))/(np.max(inst_img)-np.min(inst_img))
inst_img1 = (inst_img1-np.min(inst_img1))/(np.max(inst_img1)-np.min(inst_img1))
if np.isnan(np.min(inst_img)) or np.isnan(np.min(inst_img1)):
print('NaN in input')
exit(1)
imgs_h[n_im,0,:,:] = inst_img
imgs_h1[n_im,0,:,:] = inst_img1
labs_h[n_im,inst_lab] = 1.
return imgs_h,imgs_h1,labs_h,pos_x_h,pos_y_h,size_h,rot_h
# Defining the RNN class
class RNNet_all(nn.Module):
def __init__(self, n_feats=8, ker_size=5,t_steps=3,b_flag=1,g_flag=1,l_flag=1,t_flag=1):
super(RNNet_all, self).__init__()
self.conv1 = nn.Conv2d(1, n_feats, ker_size)
self.pool = nn.MaxPool2d(3, 3)
self.conv2 = nn.Conv2d(n_feats, n_feats*2, ker_size)
self.fc1 = nn.Linear(n_feats*2 * 9 * 9, n_feats*16)
self.fc2 = nn.Linear(n_feats*16*t_steps, 20)
self.dropout = nn.Dropout(0.5)
self.c1xb = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xb = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xb = nn.Linear(n_feats*16, 100*100)
self.c1c1b = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1b = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1b = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2b = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2b = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1b = nn.Linear(n_feats*16, n_feats*16)
self.c1xg = nn.ConvTranspose2d(n_feats,1,7,3) # in_channel, out_channel, kernel_size, stride, padding
self.c2xg = nn.ConvTranspose2d(n_feats*2,1,20,10)
self.fc1xg = nn.Linear(n_feats*16, 100*100)
self.c1c1g = nn.Conv2d(n_feats, n_feats, ker_size, 1, 2)
self.c2c1g = nn.ConvTranspose2d(n_feats*2,n_feats,16,10)
self.fc1c1g = nn.Linear(n_feats*16, 96*96*n_feats)
self.c2c2g = nn.Conv2d(n_feats*2, n_feats*2, ker_size, 1, 2)
self.fc1c2g = nn.Linear(n_feats*16, 28*28*n_feats*2)
self.fc1fc1g = nn.Linear(n_feats*16, n_feats*16)
self.n_feats = n_feats
self.t_steps = t_steps
self.b_flag = b_flag
self.g_flag = g_flag
self.l_flag = l_flag
self.t_flag = t_flag
def forward(self, x):
actvs = {}
actvsx = {}
actvsc1 = {}
actvsc2 = {}
actvsfc = {}
actvs[0] = actvsx
actvs[1] = actvsc1
actvs[2] = actvsc2
actvs[3] = actvsfc
actvs[0][0] = F.relu(x) - F.relu(x-1) # input activation clipping implemented this way to make it differentiable
c1 = F.relu(self.conv1(actvs[0][0]))
actvs[1][0] = self.pool(c1)
c2 = F.relu(self.conv2(actvs[1][0]))
actvs[2][0] = self.pool(c2)
actvs[3][0] = F.relu(self.fc1(actvs[2][0].view(-1, self.n_feats*2 * 9 * 9)))
actvs[4] = actvs[3][0]
if self.t_steps > 0:
for t in np.arange(self.t_steps-1):
dumh000 = (x + self.b_flag*self.t_flag*(self.c1xb(actvs[1][t])+self.c2xb(actvs[2][t])+(self.fc1xb(actvs[3][t])).view(-1,1,100,100))) * (1.+self.g_flag*self.t_flag*(self.c1xg(actvs[1][t])+self.c2xg(actvs[2][t])+(self.fc1xg(actvs[3][t])).view(-1,1,100,100)))
actvs[0][t+1] = (F.relu(dumh000) - F.relu(dumh000-1))
c1 = F.relu(self.conv1(actvs[0][t+1])+self.b_flag*(self.l_flag*self.c1c1b(c1)+self.t_flag*(self.c2c1b(actvs[2][t])+(self.fc1c1b(actvs[3][t])).view(-1,self.n_feats,96,96)))) * (1.+self.g_flag*(self.l_flag*self.c1c1g(c1)+self.t_flag*(self.c2c1g(actvs[2][t])+(self.fc1c1g(actvs[3][t])).view(-1,self.n_feats,96,96))))
actvs[1][t+1] = self.pool(c1)
c2 = F.relu(self.conv2(actvs[1][t+1])+self.b_flag*(self.l_flag*self.c2c2b(c2)+self.t_flag*((self.fc1c2b(actvs[3][t])).view(-1,self.n_feats*2,28,28)))) * (1.+self.g_flag*(self.l_flag*self.c2c2g(c2)+self.t_flag*((self.fc1c2g(actvs[3][t])).view(-1,self.n_feats*2,28,28))))
actvs[2][t+1] = self.pool(c2)
actvs[3][t+1] = F.relu(self.fc1(actvs[2][t+1].view(-1, self.n_feats*2 * 9 * 9))+self.b_flag*self.l_flag*self.fc1fc1b(actvs[3][t])) * (1.+self.g_flag*self.l_flag*self.fc1fc1g(actvs[3][t]))
actvs[4] = torch.cat((actvs[4],actvs[3][t+1]),1)
actvs[5] = torch.log(torch.clamp(F.softmax(self.fc2(actvs[4]),dim=1),1e-10,1.0))
return actvs
preprocess = transforms.Compose(
[transforms.ToTensor()])
# Xavier intialisation
def weights_init(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
if __name__ == '__main__':
# torch.autograd.detect_anomaly()
############################# NETWORK PARAMETERS ##################################
n_feats = 8 # in Conv layer 1
ker_size = 5 # in Conv layer 1
b_h = 0 # bias modulation flag
g_h = 1 # gain modulation flag
l_h = 1 # lateral interactions flag
t_h = 1 # top-down interactions flag
net_num = 5 # to train multiple networks - id of current network
batch_size = 32
n_iter = 300000
lrh = 0.0001
t_steps = 4 # number of timesteps
net_save_str = 'rnn_bglt_'+str(b_h)+str(g_h)+str(l_h)+str(t_h)+'_t_'+str(t_steps)+'_num_'+str(net_num)
print(net_save_str)
############################# TRAINING ##################################
train_loss = np.zeros([np.int(n_iter/100),1])
val_loss = np.zeros([np.int(n_iter/1000),1])
net = RNNet_all(n_feats,ker_size,t_steps,b_h,g_h,l_h,t_h)
net.apply(weights_init)
net = net.float()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
# Gradient clipping at every parameter by registering hooks - the hook is called everytime a gradient is computed (https://pytorch.org/docs/stable/autograd.html)
# To avaoid exploding gradients
# for p in net.parameters():
# p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
print('Net created!')
criterion = nn.NLLLoss()
optimizer = optim.Adam(net.parameters(), lr=lrh)
inputs_v,_,labels_v,_,_,_,_ = gen_images(5000,1)
inputs_v = torch.from_numpy(inputs_v).float()
inputs_v = inputs_v.to(device)
labels_v = torch.from_numpy(labels_v).float()
labels_v = labels_v.to(device)
running_loss = 0.0
for i in range(n_iter):
inputs,_,labels,_,_,_,_ = gen_images(batch_size,0)
inputs = torch.from_numpy(inputs).float()
inputs = inputs.to(device)
labels = torch.from_numpy(labels).float()
labels = labels.to(device)
optimizer.zero_grad()
outputs1 = net(inputs.float())
loss = criterion(outputs1[5], torch.max(labels, 1)[1])
loss.backward()
# torch.nn.utils.clip_grad_value_(net.parameters(), clip_value)
optimizer.step()
running_loss += loss.item()
if i == 0:
print('%5d - loss: %.3f' % (i, running_loss))
if i % 100 == 99:
print('%5d - loss: %.3f' % (i, running_loss/100))
train_loss[int((i+1)/100)-1,0] = running_loss/100
running_loss = 0.0
if i % 1000 == 999:
net.eval()
with torch.no_grad():
outputs1 = net(inputs_v.float())
outputs = outputs1[5]
_, predicted = torch.max(outputs.data, 1)
total = labels_v.size(0)
correct = np.sum(predicted.cpu().numpy() == torch.max(labels_v, 1)[1].cpu().numpy())
loss = criterion(outputs, torch.max(labels_v, 1)[1])
val_loss[int((i+1)/1000)-1,0] = loss.item()
print('%5d - Validation accuracy: %d %%' % (i, 100 * correct / total))
print('%5d - Validation loss: %.3f' % (i, loss.item()))
net.train()
if i % 1000 == 999:
np.savez('loss_'+net_save_str+'.npz', train_loss=train_loss, val_loss=val_loss)
if i % 10000 == 9999:
torch.save(net.state_dict(), net_save_str+'.pth')
print('Done training!')
net.eval()
torch.save(net.state_dict(), net_save_str+'.pth')
np.savez('loss_'+net_save_str+'.npz', train_loss=train_loss, val_loss=val_loss)
| 15,812 | 47.210366 | 462 | py |
MetaXLR | MetaXLR-main/mlt.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
try:
import apex
except ImportError:
pass
BERT_DIM = 768
BERT_LAYERS = 13 # (emb + 12 hidden from transformers)
IGNORED_INDEX = -100
'''
def trim_input(bert_ids, bert_mask, bert_labels=None):
max_length = (bert_mask !=0).max(0)[0].nonzero().numel()
if max_length < bert_ids.shape[1]:
bert_ids = bert_ids[:, :max_length]
bert_mask = bert_mask[:, :max_length]
if bert_labels is not None:
bert_labels = bert_labels[:, :max_length]
if bert_labels is not None:
return bert_ids, bert_mask, bert_labels
else:
return bert_ids, bert_mask
'''
def permute_aug(data, mask, labels, ncopy):
# permute token order in seqs
# bs, seqlen, dim
bs, max_seqlen = data.size()
# make sure [CLS] and [SEP] is not modified?
seqlens = (mask!=0).sum(-1)-2
auged_data = [data]
auged_mask = [mask]
auged_labels = [labels]
for _ in range(ncopy):
for i in range(bs):
seqlen = int(seqlens[i].cpu().item())
perm = np.random.permutation(seqlen) + 1
new_idx = [0] + list(perm) + [seqlen+1] + list(range(seqlen+2, max_seqlen))
auged_data.append(data[i, new_idx].unsqueeze(0))
auged_mask.append(mask[i].unsqueeze(0))
auged_labels.append(labels[i, new_idx].unsqueeze(0))
return torch.cat(auged_data, 0), torch.cat(auged_mask, 0), torch.cat(auged_labels, 0)
def _dot(grad_a, grad_b):
return sum([torch.dot(gv[0].view(-1), gv[1].view(-1)) for gv in zip(grad_a, grad_b) if gv[0] is not None and gv[1] is not None])
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def sync_backward(loss, opt, args, retain_graph=False): # DDP and AMP compatible backward
if args.amp == -1: # no amp
loss.backward(retain_graph=retain_graph)
else:
with apex.amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward(retain_graph=retain_graph)
def sync_autograd(loss, net, opt, args, retain_graph=False): # DDP and AMP compatible autograd
if args.local_rank == -1: # single GPU
grads = torch.autograd.grad(loss, net.parameters(), allow_unused=True)
else:
# distributed, with AMP optionally
net.zero_grad()
if args.amp == -1: # PyTorch DDP
loss.backward(retain_graph=retain_graph)
else: # Apex DDP
with apex.amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward(retain_graph=retain_graph)
# this assumed loss scale is 1 as when it's scaled p.grad might not be the valid grad values!
grads = [p.grad.clone() for p in net.parameters()]
return grads
def modify_parameters(net, deltas, eps):
for param, delta in zip(net.parameters(), deltas):
if delta is not None:
param.data.add_(eps, delta)
# for i, param in enumerate(net.parameters()):
#param.data += eps * grads[i]
# logit is a 3d tensor and labels is 2d
def masked_cross_entropy_longvector(logit, labels):
bs, seqlen, _ = logit.size()
loss_vector = F.cross_entropy(logit.view(-1, logit.size(-1)),
labels.reshape(-1),
ignore_index=IGNORED_INDEX,
reduction='none')
# filter out IGNORED_INDEX
loss_vector = loss_vector[labels.reshape(-1)!=IGNORED_INDEX]
return loss_vector.unsqueeze(-1)
# logit is a 3d tensor and labels is 2d
def masked_cross_entropy_matrix(logit, labels):
bs, seqlen, _ = logit.size()
loss_vector = F.cross_entropy(logit.view(-1, logit.size(-1)),
labels.reshape(-1),
ignore_index=IGNORED_INDEX,
reduction='none')
loss_matrix = loss_vector.view(bs, seqlen).sum(-1) / ((labels!=IGNORED_INDEX).sum(-1))
return loss_matrix.unsqueeze(-1)
def masked_cross_entropy(logit, labels, weights=None):
# loss_sum = F.cross_entropy(logit.view(-1, logit.size(-1)),
# labels.reshape(-1),
# ignore_index=IGNORED_INDEX,
# reduction='sum')
loss = F.cross_entropy(logit.view(-1, logit.size(-1)),
labels.reshape(-1),
ignore_index=IGNORED_INDEX,
reduction='none')
if weights is not None:
# print(loss.shape)
# print(weights.shape)
loss_sum = torch.sum(loss * weights)
# print(loss_sum)
else:
loss_sum = torch.sum(loss)
loss = loss_sum / (labels!=IGNORED_INDEX).sum()
return loss
# this only apply meta_net to the transformer in layerid
def forward_last(model, raptor, data, mask, ext_mask, layerid): # only insert meta_net to the last layer of transformer
_, h = model(data, attention_mask=mask)
new_h = raptor(h[layerid]) # h from last transformer
logit = model.forward_tail(layerid+1, new_h, attention_mask=ext_mask)
return logit
def step_mlt_multi(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s,
data_t, mask_t, target_t,
eta, args):
# META NET START
# given current meta net, get transformed features
_, h_s = main_net(data_s, attention_mask=mask_s)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha()
loss_s = 0
for i in range(BERT_LAYERS):
new_h = meta_net(h_s[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
#f_param_grads = sync_autograd(loss_s, main_net, main_opt, args, retain_graph=True)
f_param_grads = torch.autograd.grad(loss_train, main_net.parameters(), allow_unused=True, create_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
#f_param.append(param.data.clone())
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_t, _ = main_net(data_t, attention_mask=mask_t)
loss_t = masked_cross_entropy(logit_t, target_t)
f_param_grads_prime = torch.autograd.grad(loss_eval, main_net.parameters(), allow_unused=True)
#f_param_grads_prime = sync_autograd(loss_t, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
proxy_g = -args.magic * eta * _dot(f_param_grads, f_param_grads_prime)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_g
sync_backward(proxy_g, meta_opt, args)
meta_opt.step()
# META NET END
# MAIN NET START
#main_net.train()
# loss on data_s
_, h_s = main_net(data_s, attention_mask=mask_s)
alpha = meta_net.get_alpha().detach()
loss_s = 0
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_p[i]).detach()
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_s, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_t, loss_s
# ============== mlt step procedure debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
def step_metaw_mix(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s, # data from other languages
data_t, mask_t, target_t, # train data for target lang
data_g, mask_g, target_g, # validation data for target lang
eta, args):
# META NET START
# given current meta net, get transformed features
logit_s, h_s = main_net(data_s, attention_mask=mask_s)
logit_t, _ = main_net(data_t, attention_mask=mask_t)
loss_t = masked_cross_entropy(logit_t, target_t)
loss_s = masked_cross_entropy_matrix(logit_s, target_s)
w = meta_net(loss_s.detach()) # (bs, 1)
loss_s_w = w * loss_s
bs_t = (target_t!=IGNORED_INDEX).sum()
bs_s = (target_s!=IGNORED_INDEX).sum()
loss_train = (loss_t * bs_t + loss_s_w.sum()) / (bs_t + bs_s) # bs_s or w.sum()????
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
#f_param_grads = sync_autograd(loss_train, main_net, main_opt, args, retain_graph=True)
f_param_grads = torch.autograd.grad(loss_train, main_net.parameters(), allow_unused=True, create_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_g, _ = main_net(data_g, attention_mask=mask_g)
loss_eval = masked_cross_entropy(logit_g, target_g)
f_param_grads_prime = torch.autograd.grad(loss_eval, main_net.parameters(), allow_unused=True)
#f_param_grads_prime = sync_autograd(loss_eval, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
proxy_g = -args.magic * eta * _dot(f_param_grads, f_param_grads_prime)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_eval
sync_backward(proxy_g, meta_opt, args)#, retain_graph=True)
meta_opt.step()
# META NET END
# MAIN NET START
# loss on data_s
logit_t, _ = main_net(data_t, attention_mask=mask_t)
logit_s, h_s = main_net(data_s, attention_mask=mask_s)
loss_t = masked_cross_entropy(logit_t, target_t)
loss_s = masked_cross_entropy_matrix(logit_s, target_s)
w = meta_net(loss_s.detach()).detach() # note the detach here
loss_s_w = w * loss_s
loss_train = (loss_t * bs_t + loss_s_w.sum()) / (bs_t + bs_s) # bs_s or w.sum()????
# update classifier weights
main_opt.zero_grad()
# backward on loss_train
sync_backward(loss_train, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_eval, loss_train
# ============== mlt step procedure debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
def step_metawt_mix(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s, # data from other languages
data_t, mask_t, target_t, # train data for target lang
data_g, mask_g, target_g, # validation data for target lang
eta, args):
# META NET START
# given current meta net, get transformed features
logit_s, h_s = main_net(data_s, attention_mask=mask_s)
logit_t, _ = main_net(data_t, attention_mask=mask_t)
loss_t = masked_cross_entropy(logit_t, target_t)
loss_s = masked_cross_entropy_longvector(logit_s, target_s)
w = meta_net(loss_s.detach())
print(w[0])
loss_s_w = w * loss_s
bs_t = (target_t!=IGNORED_INDEX).sum()
bs_s = (target_s!=IGNORED_INDEX).sum()
loss_train = (loss_t * bs_t + loss_s_w.sum()) / (bs_t + bs_s) # bs_s or w.sum()????
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
#f_param_grads = sync_autograd(loss_train, main_net, main_opt, args, retain_graph=True)
f_param_grads = torch.autograd.grad(loss_train, main_net.parameters(), allow_unused=True, create_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_g, _ = main_net(data_g, attention_mask=mask_g)
loss_eval = masked_cross_entropy(logit_g, target_g)
f_param_grads_prime = torch.autograd.grad(loss_eval, main_net.parameters(), allow_unused=True)
#f_param_grads_prime = sync_autograd(loss_eval, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
proxy_g = -args.magic * eta * _dot(f_param_grads, f_param_grads_prime)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_eval
sync_backward(proxy_g, meta_opt, args)#, retain_graph=True)
meta_opt.step()
# META NET END
# MAIN NET START
# loss on data_s
logit_t, _ = main_net(data_t, attention_mask=mask_t)
logit_s, h_s = main_net(data_s, attention_mask=mask_s)
loss_t = masked_cross_entropy(logit_t, target_t)
loss_s = masked_cross_entropy_longvector(logit_s, target_s)
w = meta_net(loss_s.detach()).detach() # note the detach here
loss_s_w = w * loss_s
loss_train = (loss_t * bs_t + loss_s_w.sum()) / (bs_t + bs_s) # bs_s or w.sum()????
# update classifier weights
main_opt.zero_grad()
# backward on loss_train
sync_backward(loss_train, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_eval, loss_train
# ============== mlt step procedure debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
def step_metawt_multi_mix(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s, # data from other languages
data_t, mask_t, target_t, # train data for target lang
data_g, mask_g, target_g, # validation data for target lang
eta, args, idx):# idx is id for lang
# META NET START
# given current meta net, get transformed features
logit_s, h_s = main_net(data_s, attention_mask=mask_s)
logit_t, _ = main_net(data_t, attention_mask=mask_t)
loss_t = masked_cross_entropy(logit_t, target_t)
loss_s = masked_cross_entropy_longvector(logit_s, target_s)
w = meta_net(idx, loss_s.detach())
loss_s_w = w * loss_s
bs_t = (target_t!=IGNORED_INDEX).sum()
bs_s = (target_s!=IGNORED_INDEX).sum()
loss_train = (loss_t * bs_t + loss_s_w.sum()) / (bs_t + bs_s) # bs_s or w.sum()????
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
#f_param_grads = sync_autograd(loss_train, main_net, main_opt, args, retain_graph=True)
f_param_grads = torch.autograd.grad(loss_train, main_net.parameters(), allow_unused=True, create_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_g, _ = main_net(data_g, attention_mask=mask_g)
loss_eval = masked_cross_entropy(logit_g, target_g)
f_param_grads_prime = torch.autograd.grad(loss_eval, main_net.parameters(), allow_unused=True)
#f_param_grads_prime = sync_autograd(loss_eval, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
proxy_g = -args.magic * eta * _dot(f_param_grads, f_param_grads_prime)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_eval
sync_backward(proxy_g, meta_opt, args)#, retain_graph=True)
meta_opt.step()
# META NET END
# MAIN NET START
# loss on data_s
logit_t, _ = main_net(data_t, attention_mask=mask_t)
logit_s, h_s = main_net(data_s, attention_mask=mask_s)
loss_t = masked_cross_entropy(logit_t, target_t)
loss_s = masked_cross_entropy_longvector(logit_s, target_s)
w = meta_net(idx, loss_s.detach()).detach() # note the detach here
loss_s_w = w * loss_s
loss_train = (loss_t * bs_t + loss_s_w.sum()) / (bs_t + bs_s) # bs_s or w.sum()????
# update classifier weights
main_opt.zero_grad()
# backward on loss_train
sync_backward(loss_train, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_eval, loss_train
# ============== mlt step procedure debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
def step_mlt_multi_mix(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s, # data from other languages
data_t, mask_t, target_t, # train data for target lang
data_g, mask_g, target_g, # validation data for target lang
eta, args):
# META NET START
# given current meta net, get transformed features
_, h_s = main_net(data_s, attention_mask=mask_s)
logit_t, _ = main_net(data_t, attention_mask=mask_t)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha()
loss_train = masked_cross_entropy(logit_t, target_t)
loss_train2 = 0
for i in range(BERT_LAYERS):
new_h = meta_net(i, h_s[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_train2 += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_train = (loss_train + loss_train2) / 2
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
#f_param_grads = sync_autograd(loss_train, main_net, main_opt, args, retain_graph=True)
f_param_grads = torch.autograd.grad(loss_train, main_net.parameters(), allow_unused=True, create_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
#f_param.append(param.data.clone())
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_g, _ = main_net(data_g, attention_mask=mask_g)
loss_eval = masked_cross_entropy(logit_g, target_g)
f_param_grads_prime = torch.autograd.grad(loss_eval, main_net.parameters(), allow_unused=True)
#f_param_grads_prime = sync_autograd(loss_eval, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
proxy_g = -args.magic * eta * _dot(f_param_grads, f_param_grads_prime)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_eval
sync_backward(proxy_g, meta_opt, args, retain_graph=True)
meta_opt.step()
# META NET END
# MAIN NET START
# loss on data_s
#_, h_s = main_net(data_s, attention_mask=mask_s)
#logit_t, _ = main_net(data_t, attention_mask=mask_t)
alpha = meta_net.get_alpha().detach()
loss_train = masked_cross_entropy(logit_t, target_t)
loss_train2 = 0
for i in range(BERT_LAYERS):
new_h = meta_net(i, h_s[i]).detach()
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_train2 += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_train = (loss_train + loss_train2) / 2
# update classifier weights
main_opt.zero_grad()
# backward on loss_train
sync_backward(loss_train, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_eval, loss_train
# ============== mlt step procedure debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
def step_mlt_multi_fd(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s, # data from other languages
data_t, mask_t, target_t, # train data for target lang
data_g, mask_g, target_g, # validation data for target lang
eta, args):
# META NET START
# given current meta net, get transformed features
_, h_s = main_net(data_s, attention_mask=mask_s)
logit_t, _ = main_net(data_t, attention_mask=mask_t)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha()
loss_train = masked_cross_entropy(logit_t, target_t)
loss_train2 = 0
for i in range(BERT_LAYERS):
new_h = meta_net(i, h_s[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_train2 += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_train = (loss_train + loss_train2) / 2
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
f_param_grads = sync_autograd(loss_train, main_net, main_opt, args, retain_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
#f_param.append(param.data.clone())
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_g = main_net(data_g, attention_mask=mask_g)[0]
loss_eval = masked_cross_entropy(logit_g, target_g)
f_param_grads_prime = sync_autograd(loss_eval, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
# change main_net parameter
eps = 1e-3 # #1e-2 / _concat(f_param_grads_prime).norm()# eta 1e-6 before
# modify w to w+
modify_parameters(main_net, f_param_grads_prime, eps)
_, h_s_p = main_net(data_s, attention_mask=mask_s)
logit_t_p, _ = main_net(data_t, attention_mask=mask_t)
loss_train_p = masked_cross_entropy(logit_t_p, target_t)
loss_train_p2 = 0
for i in range(BERT_LAYERS):
new_h = meta_net(i, h_s_p[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_train_p2 += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_train_p = (loss_train_p + loss_train_p2) / 2
# modify w to w- (w is w+ now)
modify_parameters(main_net, f_param_grads_prime, -2*eps)
_, h_s_n = main_net(data_s, attention_mask=mask_s)
logit_t_n, _ = main_net(data_t, attention_mask=mask_t)
loss_train_n = masked_cross_entropy(logit_t_n, target_t)
loss_train_n2 = 0
for i in range(BERT_LAYERS):
new_h = meta_net(i, h_s_n[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_train_n2 += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_train_n = (loss_train_n + loss_train_n2) / 2
proxy_g = -args.magic * eta * (loss_train_p - loss_train_n) / (2.*eps)
# modify to original w
modify_parameters(main_net, f_param_grads_prime, eps)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_eval
sync_backward(proxy_g, meta_opt, args)
meta_opt.step()
# META NET END
# MAIN NET START
# loss on data_s
_, h_s = main_net(data_s, attention_mask=mask_s)
logit_t, _ = main_net(data_t, attention_mask=mask_t)
alpha = meta_net.get_alpha().detach()
loss_train = masked_cross_entropy(logit_t, target_t)
loss_train2 = 0
for i in range(BERT_LAYERS):
new_h = meta_net(i, h_s[i]).detach()
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_train2 += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_train = (loss_train + loss_train2) / 2
# update classifier weights
main_opt.zero_grad()
# backward on loss_train
sync_backward(loss_train, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_eval, loss_train
# ============== mlt step procedure debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
def step_mlt(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s,
data_t, mask_t, target_t,
eta, args):
# META NET START
# given current meta net, get transformed features
_, h_s = main_net(data_s, attention_mask=mask_s)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha()
loss_s = 0
for i in range(BERT_LAYERS):
new_h = meta_net(h_s[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
f_param_grads = sync_autograd(loss_s, main_net, main_opt, args, retain_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
#f_param.append(param.data.clone())
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_t = main_net(data_t, attention_mask=mask_t)[0]
loss_t = masked_cross_entropy(logit_t, target_t)
f_param_grads_prime = sync_autograd(loss_t, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
# change main_net parameter
eps = 1e-6 # 1e-3 / _concat(f_param_grads_prime).norm()# eta 1e-6 before
# modify w to w+
modify_parameters(main_net, f_param_grads_prime, eps)
_, h_s_p = main_net(data_s, attention_mask=mask_s)
loss_s_p = 0
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_p[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s_p += alpha[i] * masked_cross_entropy(logit_s, target_s)
# modify w to w- (w is w+ now)
modify_parameters(main_net, f_param_grads_prime, -2*eps)
_, h_s_n = main_net(data_s, attention_mask=mask_s)
loss_s_n = 0
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_n[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s_n += alpha[i] * masked_cross_entropy(logit_s, target_s)
proxy_g = -args.magic * eta * (loss_s_p - loss_s_n) / (2.*eps)
# modify to original w
modify_parameters(main_net, f_param_grads_prime, eps)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_g
sync_backward(proxy_g, meta_opt, args)
meta_opt.step()
# META NET END
# MAIN NET START
#main_net.train()
# loss on data_s
_, h_s = main_net(data_s, attention_mask=mask_s)
alpha = meta_net.get_alpha().detach()
loss_s = 0
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_p[i]).detach()
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_s, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_t, loss_s
# ============== mlt step procedure debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
def step_mlt_mix(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s,
data_g, mask_g, target_g,
data_t, mask_t, target_t,
eta, args):
# META NET START
# given current meta net, get transformed features
_, h_s = main_net(data_s, attention_mask=mask_s)
logit_g, _ = main_net(data_g, attention_mask=mask_g)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha()
loss_s = masked_cross_entropy(logit_g, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s /= 2
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
f_param_grads = sync_autograd(loss_s, main_net, main_opt, args, retain_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
#f_param.append(param.data.clone())
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_t = main_net(data_t, attention_mask=mask_t)[0]
loss_t = masked_cross_entropy(logit_t, target_t)
f_param_grads_prime = sync_autograd(loss_t, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
# change main_net parameter
eps = 1e-6 # 1e-3 / _concat(f_param_grads_prime).norm()# eta 1e-6 before
# modify w to w+
modify_parameters(main_net, f_param_grads_prime, eps)
_, h_s_p = main_net(data_s, attention_mask=mask_s)
logit_g_p, _ = main_net(data_g, attention_mask=mask_g)
loss_s_p = masked_cross_entropy(logit_g_p, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_p[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s_p += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s_p /= 2
# modify w to w- (w is w+ now)
modify_parameters(main_net, f_param_grads_prime, -2*eps)
_, h_s_n = main_net(data_s, attention_mask=mask_s)
logit_g_n, _ = main_net(data_g, attention_mask=mask_g)
loss_s_n = masked_cross_entropy(logit_g_n, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_n[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s_n += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s_n /= 2
proxy_g = -args.magic * eta * (loss_s_p - loss_s_n) / (2.*eps)
# modify to original w
modify_parameters(main_net, f_param_grads_prime, eps)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_g
sync_backward(proxy_g, meta_opt, args)
meta_opt.step()
# META NET END
# MAIN NET START
# loss on data_s
_, h_s = main_net(data_s, attention_mask=mask_s)
logit_g, _ = main_net(data_g, attention_mask=mask_g)
alpha = meta_net.get_alpha().detach()
loss_s = masked_cross_entropy(logit_g, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_p[i]).detach()
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s /=2
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_s, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_t, loss_s
# ============== gold only (supervised method) ===================
# NOTE: main_net is a BERT-like model
def step_gold_only(main_net, main_opt,
data_g, mask_g, target_g,
args):
# MAIN NET START
logit_g, _ = main_net(data_g, attention_mask=mask_g, for_classification=(args.task_name=="sent"))
loss_s = masked_cross_entropy(logit_g, target_g)
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_s, main_opt, args)
main_opt.step()
# MAIN NET END
loss_t = torch.tensor(-1).type_as(loss_s)
return loss_t, loss_s
# ============== gold mix (supervised method) ===================
# NOTE: main_net is a BERT-like model
def step_gold_mix(main_net, main_opt,
data_s, mask_s, target_s,
data_g, mask_g, target_g,
args):
# MAIN NET START
logit_g, _ = main_net(data_g, attention_mask=mask_g, for_classification= (args.task_name=="sent"))
loss_s = masked_cross_entropy(logit_g, target_g)
logit_s, _ = main_net(data_s, attention_mask=mask_s, for_classification= (args.task_name=="sent"))
loss_s += masked_cross_entropy(logit_s, target_s)
loss_s /= 2
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_s, main_opt, args)
main_opt.step()
# MAIN NET END
loss_t = torch.tensor(-1).type_as(loss_s)
return loss_t, loss_s
# ============== mlt zero resource transfer debug ===================
# NOTE: main_net is a BERT-like model
# meta_net is implemented as nn.Module as usual
# target_g shouldn't be used
def step_zero_mix(main_net, main_opt, meta_net, meta_opt,
data_s, mask_s, target_s,
data_g, mask_g, target_g,
data_t, mask_t, target_t,
eta, args):
# META NET START
# given current meta net, get transformed features
_, h_s = main_net(data_s, attention_mask=mask_s)
logit_g, _ = main_net(data_g, attention_mask=mask_g)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha()
loss_s = masked_cross_entropy(logit_g, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s /= 2
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
f_param_grads = sync_autograd(loss_s, main_net, main_opt, args, retain_graph=True)
# /////////// NEW WAY ////////////
# or just use SGD as in Algorithm 1, this works best for now
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
#f_param.append(param.data.clone())
param.data.sub_(args.magic*eta*f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logit_t = main_net(data_t, attention_mask=mask_t)[0]
loss_t = masked_cross_entropy(logit_t, target_t)
f_param_grads_prime = sync_autograd(loss_t, main_net, main_opt, args)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
# change main_net parameter
eps = 1e-6 # 1e-3 / _concat(f_param_grads_prime).norm()# eta 1e-6 before
# modify w to w+
modify_parameters(main_net, f_param_grads_prime, eps)
_, h_s_p = main_net(data_s, attention_mask=mask_s)
logit_g_p, _ = main_net(data_g, attention_mask=mask_g)
loss_s_p = masked_cross_entropy(logit_g_p, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_p[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s_p += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s_p /= 2
# modify w to w- (w is w+ now)
modify_parameters(main_net, f_param_grads_prime, -2*eps)
_, h_s_n = main_net(data_s, attention_mask=mask_s)
logit_g_n, _ = main_net(data_g, attention_mask=mask_g)
loss_s_n = masked_cross_entropy(logit_g_n, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_n[i])
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s_n += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s_n /= 2
proxy_g = -args.magic * eta * (loss_s_p - loss_s_n) / (2.*eps)
# modify to original w
modify_parameters(main_net, f_param_grads_prime, eps)
# back prop on alphas
meta_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_g
sync_backward(proxy_g, meta_opt, args)
meta_opt.step()
# META NET END
# MAIN NET START
# loss on data_s
_, h_s = main_net(data_s, attention_mask=mask_s)
logit_g, _ = main_net(data_g, attention_mask=mask_g)
alpha = meta_net.get_alpha().detach()
loss_s = masked_cross_entropy(logit_g, target_g)
for i in range(BERT_LAYERS):
new_h = meta_net(h_s_p[i]).detach()
logit_s = main_net.forward_tail(i+1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[i] * masked_cross_entropy(logit_s, target_s)
loss_s /=2
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_s, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_t, loss_s
def get_mask(mask):
src_lengths = torch.sum(mask, dim=1)
max_length = mask.shape[1]
permutation_mask = torch.stack([F.pad(torch.ones(src_length - 1, src_length - 1),
(0, max_length - src_length, 0, max_length - src_length))
for src_length in src_lengths])
permutation_minimal = torch.ones_like(permutation_mask) * (-100000)
permutation_mask = permutation_mask.to(mask.device)
permutation_minimal = permutation_minimal.to(mask.device)
return permutation_mask, permutation_minimal
def detached(x, detach):
if detach:
return x.detach()
else:
return x
# ============== metaxl dynamic language and layer debug ===================
def step_metaxl(main_net, main_opt,
meta_net, meta_opt,
reweighting_model, reweighting_opt,
data_s, mask_s, target_s,
data_g, mask_g, target_g,
data_t, mask_t, target_t,
source_language_id, transfer_layers, eta, args):
print(type(main_net))
bs_s = (target_s != IGNORED_INDEX).sum()
bs_g = (target_g != IGNORED_INDEX).sum()
logits_s, h_s = main_net(data_s, attention_mask=mask_s, for_classification=(args.task_name=="sent"))
logits_g, _ = main_net(data_g, mask_g, for_classification=(args.task_name=="sent"))
loss_g = masked_cross_entropy(logits_g, target_g)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha(i = source_language_id)
loss_s = 0
for j, layer_id in enumerate(transfer_layers):
new_h = meta_net(source_language_id, j, h_s[layer_id].detach()) + h_s[layer_id]
sequence_output = main_net.forward_tail(layer_id + 1, new_h, attention_mask=ext_mask_s)
logits_s = main_net.forward_classifier(sequence_output, for_classification=(args.task_name=="sent"))
if args.add_instance_weights:
if args.weights_from == "features":
weights = reweighting_model(sequence_output.detach()) # batch * token * 1
weights = weights.squeeze(-1).view(-1)
# weights = reweighting_model(sequence_output[:, 0].detach()) # batch * 1 * 1
# weights = weights.repeat(1, sequence_output.shape[1]).view(-1)
loss_s += alpha[j] * masked_cross_entropy(logits_s, target_s, weights)
elif args.weights_from == "loss":
loss_ = masked_cross_entropy_longvector(logits_s, target_s)
weights = reweighting_model(loss_.detach())
loss_s += alpha[j] * (loss_ * weights).sum()
else:
loss_s += alpha[j] * masked_cross_entropy(logits_s, target_s)
if len(transfer_layers) == 0:
if args.add_instance_weights:
sequence_output = h_s[-1]
if args.weights_from == "features":
weights = reweighting_model(sequence_output.detach()) # batch * token * 1
weights = weights.squeeze(-1).view(-1)
loss_s += masked_cross_entropy(logits_s, target_s, weights)
elif args.weights_from == "loss":
loss_ = masked_cross_entropy_longvector(logits_s, target_s)
weights = reweighting_model(loss_.detach())
loss_s += (loss_ * weights).sum()
else:
loss_s += masked_cross_entropy(logits_s, target_s)
if args.add_instance_weights and args.weights_from == "loss":
loss_train = (loss_s + loss_g * bs_g) / (bs_s + bs_g)
else:
loss_train = loss_s + loss_g
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
# 1. calculate gradient
f_param_grads = torch.autograd.grad(loss_train, main_net.parameters(), allow_unused=True, create_graph=True)
# 2. update model parameters with the gradient
f_param = [param.data.clone() for param in main_net.parameters()]
for i, param in enumerate(main_net.parameters()):
if f_param_grads[i] is not None:
param.data.sub_(args.magic * eta * f_param_grads[i]) # SGD update
# 3. compute d_w' L_{D}(w')
logits_t, h_t = main_net(data_t, mask_t, for_classification=(args.task_name=="sent"))
loss_t = masked_cross_entropy(logits_t, target_t)
f_param_grads_prime = torch.autograd.grad(loss_t, main_net.parameters(), allow_unused=True)
# 4. revert from w' to w for main net
for i, param in enumerate(main_net.parameters()):
param.data = f_param[i]
proxy_g = -args.magic * eta * _dot(f_param_grads, f_param_grads_prime)
# back prop on alphas and extra structures
if meta_opt is not None:
meta_opt.zero_grad()
# print("before permute", permutate_net.permute_net[2].weight)
if args.add_instance_weights:
reweighting_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_g
# sync_backward(proxy_g, meta_opt, args)
# sync_backward(proxy_g, sinkhorn_opt, args)
proxy_g.backward(retain_graph=False)
# torch.nn.utils.clip_grad_norm_(meta_net.parameters(), 10)
if reweighting_model is not None:
torch.nn.utils.clip_grad_norm_(reweighting_model.parameters(), 10)
if meta_opt is not None:
meta_opt.step()
if args.add_instance_weights:
reweighting_opt.step()
# loss on data_s
logits_s, h_s = main_net(data_s, attention_mask=mask_s, for_classification=(args.task_name=="sent"))
logits_g, _ = main_net(data_g, attention_mask=mask_g, for_classification=(args.task_name=="sent"))
loss_g = masked_cross_entropy(logits_g, target_g)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha(i=source_language_id)
loss_s = 0
for j, layer_id in enumerate(transfer_layers):
new_h = meta_net(source_language_id, j, h_s[layer_id].detach()) + h_s[layer_id]
sequence_output = main_net.forward_tail(layer_id + 1, new_h, attention_mask=ext_mask_s)
logits_s = main_net.forward_classifier(sequence_output, for_classification=(args.task_name=="sent"))
if args.add_instance_weights:
if args.weights_from == "features":
weights = reweighting_model(sequence_output.detach()) # batch * token * 1
weights = weights.squeeze(-1).view(-1)
loss_s += alpha[j] * masked_cross_entropy(logits_s, target_s, weights)
elif args.weights_from == "loss":
loss_ = masked_cross_entropy_longvector(logits_s, target_s)
weights = reweighting_model(loss_.detach())
print(weights[0])
loss_s += alpha[j] * (loss_ * weights).sum()
else:
loss_s += alpha[j] * masked_cross_entropy(logits_s, target_s)
if len(transfer_layers) == 0:
if args.add_instance_weights:
sequence_output = h_s[-1]
if args.weights_from == "features":
weights = reweighting_model(sequence_output.detach()) # batch * token * 1
weights = weights.squeeze(-1).view(-1)
loss_s += masked_cross_entropy(logits_s, target_s, weights)
elif args.weights_from == "loss":
loss_ = masked_cross_entropy_longvector(logits_s, target_s)
weights = reweighting_model(loss_.detach())
loss_s += (loss_ * weights).sum()
else:
loss_s += masked_cross_entropy(logits_s, target_s)
if args.add_instance_weights and args.weights_from == "loss":
loss_train = (loss_s + loss_g * bs_g) / (bs_s + bs_g)
else:
loss_train = loss_s + loss_g
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_train, main_opt, args)
torch.nn.utils.clip_grad_norm_(main_net.parameters(), 10)
main_opt.step()
# MAIN NET END
return loss_t, loss_train
def step_jt_metaxl(main_net, main_opt,
meta_net, meta_opt,
reweighting_model, reweighting_opt,
data_s, mask_s, target_s,
data_g, mask_g, target_g,
source_language_id, transfer_layers, eta, args):
bs_s = (target_s != IGNORED_INDEX).sum()
bs_g = (target_g != IGNORED_INDEX).sum()
logits_s, h_s = main_net(data_s, attention_mask=mask_s, for_classification=(args.task_name=="sent"))
logits_g, _ = main_net(data_g, mask_g, for_classification=(args.task_name=="sent"))
loss_g = masked_cross_entropy(logits_g, target_g)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha(i = source_language_id)
loss_s = 0
for j, layer_id in enumerate(transfer_layers):
new_h = meta_net(source_language_id, j, h_s[layer_id])
sequence_output = main_net.forward_tail(layer_id + 1, new_h, attention_mask=ext_mask_s)
logits_s = main_net.forward_classifier(sequence_output, for_classification=(args.task_name=="sent"))
if args.add_instance_weights:
if args.weights_from == "features":
weights = reweighting_model(sequence_output[:, 0].detach()) # batch * 1 * 1
weights = weights.repeat(1, sequence_output.shape[1]).view(-1)
loss_s += alpha[j] * masked_cross_entropy(logits_s, target_s, weights)
elif args.weights_from == "loss":
loss_ = masked_cross_entropy_longvector(logits_s, target_s)
weights = reweighting_model(loss_.detach())
loss_s += alpha[j] * (loss_ * weights).sum()
else:
loss_s += alpha[j] * masked_cross_entropy(logits_s, target_s)
if len(transfer_layers) == 0:
if args.add_instance_weights:
sequence_output = h_s[-1]
if args.weights_from == "features":
weights = reweighting_model(sequence_output.detach()) # batch * token * 1
weights = weights.squeeze(-1).view(-1)
loss_s += masked_cross_entropy(logits_s, target_s, weights)
elif args.weights_from == "loss":
loss_ = masked_cross_entropy_longvector(logits_s, target_s)
weights = reweighting_model(loss_.detach())
loss_s += (loss_ * weights).sum()
else:
loss_s += masked_cross_entropy(logits_s, target_s)
if args.add_instance_weights and args.weights_from == "loss":
loss_train = (loss_s + loss_g * bs_g) / (bs_s + bs_g)
else:
loss_train = loss_s + loss_g
# retain graph as for DDP it uses backward to get the gradients, is not set when using single GPU
# update classifier weights
main_opt.zero_grad()
# back prop on alphas and extra structures
meta_opt.zero_grad()
if args.add_instance_weights:
reweighting_opt.zero_grad()
# backward on proxy_g as proxy_g shares the same gradient as loss_g
# sync_backward(proxy_g, meta_opt, args)
# sync_backward(proxy_g, sinkhorn_opt, args)
loss_train.backward(retain_graph=False)
# torch.nn.utils.clip_grad_norm_(meta_net.parameters(), 10)
if reweighting_model is not None:
torch.nn.utils.clip_grad_norm_(reweighting_model.parameters(), 10)
main_opt.step()
meta_opt.step()
if args.add_instance_weights:
reweighting_opt.step()
return torch.tensor(-1).type_as(loss_train), loss_train
# ============== metaxl finetune dynamic language and layer debug ===================
def step_metaxl_finetune(main_net, main_opt, meta_net,
data_s, mask_s, target_s,
data_t, mask_t, target_t,
source_language_id, transfer_layers, args):
_, h_s = main_net(data_s, attention_mask=mask_s, for_classfication=args.for_classification)
logits_t, h_t = main_net(data_t, attention_mask= mask_t, for_classfication=args.for_classification)
loss_t = masked_cross_entropy(logits_t, target_t)
ext_mask_s = main_net.get_ext_mask(mask_s)
alpha = meta_net.get_alpha(i = source_language_id)
loss_s = 0
for j, layer_id in enumerate(transfer_layers):
new_h = meta_net(source_language_id, j, h_s[layer_id])
logit_s = main_net.forward_tail(layer_id + 1, new_h, attention_mask=ext_mask_s)
loss_s += alpha[j] * masked_cross_entropy(logit_s, target_s)
# update classifier weights
main_opt.zero_grad()
# backward on loss_s
sync_backward(loss_s + loss_t, main_opt, args)
main_opt.step()
# MAIN NET END
return loss_t, loss_s
| 50,680 | 38.075559 | 132 | py |
MetaXLR | MetaXLR-main/data_utils.py | # this class wraps a torch.utils.data.DataLoader into an iterator for batch by batch fetching
import torch
class DataIterator(object):
def __init__(self, dataloader, nonstop=True):
assert isinstance(dataloader, torch.utils.data.DataLoader), 'Wrong loader type'
self.loader = dataloader
self.iterator = iter(self.loader)
self.nonstop = nonstop
def __next__(self):
try:
tup = next(self.iterator)
except StopIteration:
if not self.nonstop:
raise StopIteration()
self.iterator = iter(self.loader)
tup = next(self.iterator)
return tup
| 674 | 29.681818 | 93 | py |
MetaXLR | MetaXLR-main/models.py | import numpy as np
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertTokenizer, BertForTokenClassification, BertPreTrainedModel, XLMRobertaTokenizer, XLMRobertaForTokenClassification
from transformers.modeling_bert import BertLayer, BertModel, BertEmbeddings, BertEncoder, BertPooler
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from tokenizers import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from torch.nn import CrossEntropyLoss, MSELoss
IGNORED_INDEX = -100
class BERTSequenceTokenizer():
def __init__(self, bert_name, max_len=512, cache_dir='cache', tokenizer_dir=None):
#from pytorch_transformers import BertTokenizer
self.CLS = '[CLS]'
self.SEP = '[SEP]'
self.max_len = max_len
tok = XLMRobertaTokenizer if bert_name.startswith("xlm") else BertTokenizer
if tokenizer_dir is None or tokenizer_dir == "None":
self.tokenizer = tok.from_pretrained(bert_name, cache_dir=cache_dir)
else:
self.tokenizer = tok.from_pretrained(os.path.join(tokenizer_dir, 'vocab-vocab.txt'))
# self.tokenizer = ByteLevelBPETokenizer(
# vocab_file=os.path.join(tokenizer_dir, "vocab.json"),
# merges_file=os.path.join(tokenizer_dir, "merges.txt"))
# self.cls_id = self.tokenizer.token_to_id(self.CLS)
# self.sep_id = self.tokenizer.token_to_id(self.SEP)
self.cls_id = self.tokenizer.convert_tokens_to_ids(self.CLS)
self.sep_id = self.tokenizer.convert_tokens_to_ids(self.SEP)
def encode(self, token_list, label_list=None):
if type(label_list) == list:
assert len(token_list) == len(label_list), 'Mismatch text and label length!'
n_tokens = len(token_list)
ids = [self.cls_id]
labels = [IGNORED_INDEX]
for i, token in enumerate(token_list):
subword_ids = self.tokenizer.encode(token, add_special_tokens=False) # add_special_tokens has to be FALSE here
if len(subword_ids) == 0: # some instance in wikiann is empty but has ner tags
subword_ids = [self.tokenizer.convert_tokens_to_ids('[OOV]')]
print('Emtpy subwords for |%s|, Token tag: %s, replaced with [OOV]' % (token, label_list[i]))
ids = ids + subword_ids
labels.append(label_list[i])
# for further subwords append IGNORED_INDEX
labels = labels + [IGNORED_INDEX] * (len(subword_ids) -1)
ids.append(self.sep_id)
labels.append(IGNORED_INDEX)
else:
ids = [self.cls_id] + self.tokenizer.encode(token_list, add_special_tokens=False) + [self.sep_id]
labels = label_list
'''
print ('========================================== ')
print ('TOK:', token_list, len(token_list))
print ('LAB:', label_list, len(label_list))
print ('SUB:', self.tokenizer.convert_ids_to_tokens(ids), len(ids))
print ('IDS:', ids, len(ids))
print ('TAG:', labels, len(labels))
print ()
'''
if type(label_list) == list:
assert len(ids) == len(labels), 'Wrong subword tokenization!'
x_len = len(ids)
if x_len > self.max_len:
ids = ids[:self.max_len]
mask = [1] * self.max_len
if type(label_list) == list:
labels = labels[:self.max_len]
print ('Excessively long sequence, trimmed down!')
else:
ids = ids + [0] * (self.max_len - x_len)
mask = [1] * x_len + [0] * (self.max_len - x_len)
if type(label_list) == list:
labels = labels + [IGNORED_INDEX] * (self.max_len - x_len)
return ids, mask, labels
def encode2(self, text, label_list):
tokens = self.tokenizer.tokenize(text)
tokens = [self.CLS] + tokens + [self.SEP] # add special tokens
ids = self.tokenizer.convert_tokens_to_ids(tokens)
x_len = len(ids)
print (x_len)
print (text)
print (tokens)
print (label_list)
ids = ids + [0] * (self.max_len - x_len)
mask = [1] * x_len + [0] * (self.max_len - x_len)
labels = [IGNORED_INDEX] * self.max_len # ignored for all and then set labels for actual tokens
idx = 0
for i, token in enumerate(tokens):
if token not in [self.CLS, self.SEP] and not token.startswith('##'): # actual token (or prefix)
labels[i] = label_list[idx]
idx += 1
return ids, mask, labels
class BERTSequenceTagger(BertForTokenClassification):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = SplitBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward_embedding_head(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,):
input_embeds, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask \
= self.bert.forwardbertembeddings(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
return input_embeds, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask
def forward_embedding_tail(self, input_embeds,
extended_attention_mask,
head_mask,
encoder_hidden_states,
encoder_extended_attention_mask,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
labels=None,):
outputs = self.bert.forwardberttail(input_embeds, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask, input_ids, position_ids, token_type_ids)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
for_classification = False
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.forward_classifier(sequence_output, for_classification)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
def get_ext_mask(self, attention_mask):
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
# note attention_mask here should be the extended attention mask constructed from forward_head
def forward_tail(self, k, x, attention_mask=None):
assert k>0 and k<= 1+self.config.num_hidden_layers, 'Wrong layer index!'
hidden_states = x
for i, layer_module in enumerate(self.bert.encoder.layer[k-1:]):
layer_outputs = layer_module(hidden_states, attention_mask)
hidden_states = layer_outputs[0]
sequence_output = hidden_states
return sequence_output
def forward_pooler(self, sequence_output):
return self.bert.pooler(sequence_output)
def forward_classifier(self, sequence_output, for_classification=False):
pooler_output = self.dropout(self.forward_pooler(sequence_output))
sequence_output = self.dropout(sequence_output)
if for_classification:
logits = self.classifier(pooler_output)
else:
logits = self.classifier(sequence_output)
return logits
class XLMRSequenceTagger(XLMRobertaForTokenClassification):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = SplitRoberta(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward_embedding_head(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None, ):
input_embeds, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask \
= self.roberta.forwardbertembeddings(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
return input_embeds, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask
def forward_embedding_tail(self, input_embeds,
extended_attention_mask,
head_mask,
encoder_hidden_states,
encoder_extended_attention_mask,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
labels=None, ):
outputs = self.roberta.forwardberttail(input_embeds, extended_attention_mask, head_mask, encoder_hidden_states,
encoder_extended_attention_mask, input_ids, position_ids, token_type_ids)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
for_classification=False
):
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.forward_classifier(sequence_output, for_classification)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
def get_ext_mask(self, attention_mask):
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
# note attention_mask here should be the extended attention mask constructed from forward_head
def forward_tail(self, k, x, attention_mask=None):
assert k > 0 and k <= 1 + self.config.num_hidden_layers, 'Wrong layer index!'
hidden_states = x
for i, layer_module in enumerate(self.roberta.encoder.layer[k - 1:]):
layer_outputs = layer_module(hidden_states, attention_mask)
hidden_states = layer_outputs[0]
sequence_output = hidden_states
return sequence_output
def forward_pooler(self, sequence_output):
return self.roberta.pooler(sequence_output)
def forward_classifier(self, sequence_output, for_classification=False):
pooler_output = self.dropout(self.forward_pooler(sequence_output))
sequence_output = self.dropout(sequence_output)
if for_classification:
logits = self.classifier(pooler_output)
else:
logits = self.classifier(sequence_output)
return logits
class Raptors(nn.Module):
def __init__(self, config, num_layers=1, num_langs=1, struct="transformer", add_weights=False, tied=True, bottle_size=768):
super().__init__()
self.nets = []
self.num_layers = num_layers
self.num_langs = num_langs
self.struct = struct
self.add_weights = add_weights
self.tied = tied
for i in range(num_langs):
for j in range(num_layers):
if struct == "transformer":
self.nets.append(BertLayer(config))
elif struct == "perceptron":
hidden_size = config.hidden_size
if add_weights:
if tied:
self.nets.append(nn.Sequential(
nn.Linear(hidden_size, bottle_size),
nn.ReLU(),
nn.Linear(bottle_size, hidden_size + 1)))
else:
self.nets.append(nn.Sequential(
nn.Linear(hidden_size, bottle_size),
nn.ReLU(),
nn.Linear(bottle_size, hidden_size)))
self.weight_net = nn.Sequential(
nn.Linear(hidden_size, bottle_size),
nn.ReLU(),
nn.Linear(bottle_size, 1)
)
else:
self.nets.append(nn.Sequential(
nn.Linear(hidden_size, hidden_size // 4),
nn.ReLU(),
nn.Linear(hidden_size // 4, hidden_size)))
else:
print("The specified structure is not implemented.")
sys.exit(0)
self.nets = nn.ModuleList(self.nets)
self.alpha = nn.Parameter(torch.zeros(num_langs, num_layers))
if struct == "perceptron":
self.init_weights()
def init_weights(self):
for i in range(len(self.nets)):
nn.init.xavier_normal_(self.nets[i][0].weight)
self.nets[i][0].bias.data.zero_()
nn.init.xavier_normal_(self.nets[i][2].weight)
self.nets[i][2].bias.data.zero_()
# nn.init.xavier_normal_(self.nets[i][4].weight)
# self.nets[i][4].bias.data.zero_()
# i: lang id j: layer id
def forward(self, i, j, x):
ind = i * self.num_layers + j
if self.struct == "transformer":
return self.nets[ind](x)[0]
elif self.struct == "perceptron":
out = self.nets[ind](x)
if self.add_weights:
if self.tied:
rep = out[:, :, :-1]
weight = F.sigmoid(out[:, :, -1]).unsqueeze(-1)
else:
rep = out
weight = F.sigmoid(self.weight_net(x))
print(weight)
out = weight * rep
return out
def get_alpha(self, i):
return F.softmax(self.alpha[i], -1)
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
featurized_sentences = []
for idx, example in enumerate(examples):
features = {}
features['bert_ids'], features['bert_mask'], features['ber_token_starts'] = tokenizer.subword_tokenize_to_ids(example.text_a)
features['label'] = label_list
featurized_sentences.append(features)
def trim_input(bert_ids, bert_mask, bert_labels=None, train_max=None):
max_length = (bert_mask !=0).max(0)[0].nonzero().numel()
if train_max is not None:
max_length = min(max_length, train_max)
if max_length < bert_ids.shape[1]:
bert_ids = bert_ids[:, :max_length]
bert_mask = bert_mask[:, :max_length]
if bert_labels is not None and bert_labels.ndim == 2:
bert_labels = bert_labels[:, :max_length]
if bert_labels is not None:
return bert_ids, bert_mask, bert_labels
else:
return bert_ids, bert_mask
def masked_cross_entropy(logit, labels, K):
loss_sum = F.cross_entropy(logit.view(-1, K),
labels.view(-1),
ignore_index=IGNORED_INDEX,
reduction='sum')
loss = loss_sum / (labels!=IGNORED_INDEX).sum()
return loss
class WNets(nn.Module):
def __init__(self, h_dim, n_lang):
super().__init__()
nets = []
for _ in range(n_lang):
nets.append(nn.Sequential(
nn.Linear(1, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, 1)
))
self.nets = nn.ModuleList(nets)
self.init_weights()
def init_weights(self):
for i in range(len(self.nets)):
nn.init.xavier_normal_(self.nets[i][0].weight)
self.nets[i][0].bias.data.zero_()
nn.init.xavier_normal_(self.nets[i][2].weight)
self.nets[i][2].bias.data.zero_()
def forward(self, i, x):
return torch.sigmoid(self.nets[i](x))
class VNet(nn.Module):
def __init__(self, in_dim, h_dim, out_dim):
super(VNet, self).__init__()
self.net = nn.Sequential(
nn.Linear(in_dim, h_dim),
#nn.ReLU(inplace=True),
nn.Tanh(),
nn.Linear(h_dim, h_dim),
#nn.ReLU(inplace=True),
nn.Tanh(),
nn.Linear(h_dim, out_dim)
)
def forward(self, x):
return torch.sigmoid(self.net(x))
class SplitBertModel(BertModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = SplitBertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def forwardbertembeddings(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = causal_mask.to(
attention_mask.dtype
) # causal and attention masks must have same type with pytorch version < 1.3
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
elif encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format(
encoder_hidden_shape, encoder_attention_mask.shape
)
)
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
inputs_embeds = self.embeddings.forward_head(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
embedding_output = self.embeddings.forward_tail(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
return embedding_output, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask
def forwardberttail(self, embedding_output, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask,
input_ids, position_ids, token_type_ids):
# embedding_output = self.embeddings.forward_tail(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class SplitRoberta(SplitBertModel):
def __init__(self, config):
super().__init__(config)
class SplitBertEmbeddings(BertEmbeddings):
def forward_head(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
return inputs_embeds
def forward_tail(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| 32,576 | 42.668901 | 190 | py |
MetaXLR | MetaXLR-main/mtrain.py | import argparse
import json
import random
import conllu
from glob import glob
import math
import pandas as pd
import numpy as np
from numpy.random import choice
from models import *
from mlt import *
from utils import *
from data_utils import DataIterator
from transformers import ( BertConfig,
XLMRobertaConfig,
get_linear_schedule_with_warmup)
from torch import nn
from torch.utils.data import (DataLoader, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from seqeval.metrics import f1_score as seq_f1_score
from seqeval.metrics import accuracy_score as seq_accuracy_score
from seqeval.metrics import precision_score as seq_precision_score
from seqeval.metrics import recall_score as seq_recall_score
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
import matplotlib.pyplot as plt
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def normalize_loss(loss):
min_loss = 0
max_loss = 2.5
if(loss > max_loss):
logger.warning(f"Exp3 loss is bigger than max: {loss}")
return 1
return (loss - min_loss) / (max_loss - min_loss)
# draw: [float] -> int
# pick an index from the given list of floats proportionally
# to the size of the entry (i.e. normalize to a probability
# distribution and draw according to the probabilities).
def draw(weights):
choice = random.uniform(0, sum(weights))
choiceIndex = 0
for weight in weights:
choice -= weight
if choice <= 0:
return choiceIndex
choiceIndex += 1
# distr: [float] -> (float)
# Normalize a list of floats to a probability distribution. Gamma is an
# egalitarianism factor, which tempers the distribtuion toward being uniform as
# it grows from zero to one.
def distr(weights, gamma=0.0):
theSum = float(sum(weights))
return tuple((1.0 - gamma) * (w / theSum) + (gamma / len(weights)) for w in weights)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.label_ids = label_ids
def readfile(filename, lang=None):
'''
read file
'''
f = open(filename)
data = []
sentence = []
label= []
for line in f:
line = line.strip()
if len(line)==0 or line.startswith('-DOCSTART') or line[0]=="\n":
if len(sentence) > 0:
data.append((sentence,label))
sentence = []
label = []
continue
#splits = line.split(' ')
splits = line.strip().split('\t')
token = splits[0]
if lang is not None and token.startswith('%s:' % lang):
token = token.split('%s:' % lang)[-1]
sentence.append(token)
label.append(splits[-1])
if len(sentence) >0:
data.append((sentence,label))
sentence = []
label = []
return data
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None, lang=None):
"""Reads a tab separated value file."""
return readfile(input_file, lang=lang)
class NerProcessor(DataProcessor):
def __init__(self):
super().__init__()
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]#, "[CLS]", "[SEP]"]
self.label_map = dict(zip(self.labels, range(len(self.labels))))
"""Processor for the CoNLL-2003 data set."""
def get_train_examples(self, data_dir, lang):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "%s.train" % lang)), "train")
def get_dev_examples(self, data_dir, lang):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "%s.dev" % lang)), "dev")
def get_test_examples(self, data_dir, lang):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "%s.test" % lang)), "test")
def get_labels(self):
return self.labels
def _create_examples(self,lines,set_type):
examples = []
for i,(sentence,label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
text_b = None
label = [self.label_map[l] for l in label]
examples.append(InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label))
return examples
class POSProcessor(DataProcessor):
def __init__(self):
super().__init__()
self.labels = ["DET", "VERB", "SYM", "SCONJ", "CCONJ", "PUNCT", "NUM", "ADP", "NOUN", "_", "PRON",
"ADJ", "PART", "ADV", "PROPN", "INTJ", "X", "AUX"]
self.label_map = dict(zip(self.labels, range(len(self.labels))))
"""Processor for the POS data set."""
def read_conllu_file(self, file):
data = conllu.parse(open(file, "r").read())
sents = []
for sentence in data:
sent = []
label = []
for token in sentence:
sent.append(token["form"])
label.append(token["upostag"])
sents.append((sent, label))
return sents
def get_train_examples(self, data_dir, lang):
"""See base class."""
file = os.path.join(data_dir, f"UD_{lang}", "*train.conllu")
file = glob(file)[0]
conllu_data = self.read_conllu_file(file)
return self._create_examples(conllu_data, "train")
def get_dev_examples(self, data_dir, lang):
"""See base class."""
file = os.path.join(data_dir, f"UD_{lang}", "*dev.conllu")
file = glob(file)[0]
conllu_data = self.read_conllu_file(file)
return self._create_examples(conllu_data, "dev")
def get_test_examples(self, data_dir, lang):
"""See base class."""
file = os.path.join(data_dir, f"UD_{lang}", "*test.conllu")
file = glob(file)[0]
conllu_data = self.read_conllu_file(file)
return self._create_examples(conllu_data, "test")
def get_labels(self):
return self.labels
def _create_examples(self, lines, set_type):
examples = []
for i, (sentence, label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
text_b = None
label = [self.label_map[l] for l in label]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SentClassProcessor(DataProcessor):
def __init__(self):
super().__init__()
self.labels = ["negative", "positive"]
self.label_map = dict(zip(self.labels, range(len(self.labels))))
"""Processor for the POS data set."""
def read_json_file(self, file):
data = json.load(open(file, "r"))
sents = []
for ex in data:
sent = ex["review_body"]
if "label" in ex:
label = ex["label"]
else:
stars = int(ex["stars"])
if stars == 3:
continue
elif stars > 3:
label = "positive"
else:
label = "negative"
sents.append((sent, label))
return sents
def get_train_examples(self, datea_dir, lang):
"""See base class."""
file = os.path.join(data_dir, f"{lang}", "*.train.json")
file = glob(file)[0]
sents = self.read_json_file(file)
return self._create_examples(sents, "train")
def get_dev_examples(self, data_dir, lang):
"""See base class."""
file = os.path.join(data_dir, f"{lang}", "*.dev.json")
file = glob(file)[0]
sents = self.read_json_file(file)
return self._create_examples(sents, "dev")
def get_test_examples(self, data_dir, lang):
"""See base class."""
file = os.path.join(data_dir, f"{lang}", "*.test.json")
file = glob(file)[0]
sents = self.read_json_file(file)
return self._create_examples(sents, "test")
def get_labels(self):
return self.labels
def _create_examples(self, lines, set_type):
examples = []
for i, (sentence, label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
text_b = None
label = self.label_map[label]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class PANXNerProcessor(DataProcessor):
def __init__(self):
super().__init__()
self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]#, "[CLS]", "[SEP]"]
self.label_map = dict(zip(self.labels, range(len(self.labels))))
"""Processor for the PANX data set."""
def get_train_examples(self, data_dir, lang):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, lang, "train"), lang=lang), "train")
def get_dev_examples(self, data_dir, lang):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, lang, "dev"), lang=lang), "dev")
def get_test_examples(self, data_dir, lang):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, lang, "test"), lang=lang), "test")
def get_labels(self):
return self.labels
def _create_examples(self,lines,set_type):
examples = []
for i,(sentence,label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
text_b = None
label = [self.label_map[l] for l in label]
examples.append(InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
features = []
for idx, example in enumerate(examples):
input_ids, input_mask, label_ids = tokenizer.encode(example.text_a, example.label)
features.append(
InputFeatures(input_ids=input_ids[:max_seq_length],
input_mask=input_mask[:max_seq_length],
label_ids=label_ids[:max_seq_length] if type(label_ids) is list else label_ids))
return features
# def eval(model, test_dataloader, processor):
# all_y_true = []
# all_y_pred = []
# for idx, batch_test in enumerate(test_dataloader):
# batch_test = tuple(t.cuda() for t in batch_test)
# test_ids, test_mask, test_labels = batch_test
# test_ids, test_mask, test_labels = trim_input(test_ids, test_mask, test_labels)
#
# with torch.no_grad():
# test_logit = model(test_ids, attention_mask=test_mask)[0]
#
# pred_labels = test_logit.max(-1)[1]
#
# y_true = [y[y!=IGNORED_INDEX].cpu().numpy().tolist() for y in test_labels]
# y_tags_true = [[processor.labels[y] for y in y_group] for y_group in y_true]
#
# y_pred = [pred[y!=IGNORED_INDEX].cpu().numpy().tolist() for (pred, y) in zip(pred_labels, test_labels)]
# y_tags_pred = [[processor.labels[y] for y in y_group] for y_group in y_pred]
# all_y_true.extend(y_tags_true)
# all_y_pred.extend(y_tags_pred)
#
# f1 = f1_score(all_y_true, all_y_pred)
# acc = accuracy_score(all_y_true, all_y_pred)
# precision = precision_score(all_y_true, all_y_pred)
# recall = recall_score(all_y_true, all_y_pred)
#
# return f1, acc, precision, recall
def eval(model, test_dataloader, processor, for_classification=False):
all_y_true = []
all_y_pred = []
for idx, batch_test in enumerate(test_dataloader):
batch_test = tuple(t.cuda() for t in batch_test)
test_ids, test_mask, test_labels = batch_test
test_ids, test_mask, test_labels = trim_input(test_ids, test_mask, test_labels)
with torch.no_grad():
test_logit = model(test_ids, attention_mask=test_mask, for_classification=for_classification)[0] # batch * sequence lens * labels
pred_labels = test_logit.max(-1)[1]
if for_classification:
all_y_true.extend(list(torch.unsqueeze(test_labels, 1).cpu().numpy()))
all_y_pred.extend(list(torch.unsqueeze(pred_labels, 1).cpu().numpy()))
else:
y_true = [y[y != IGNORED_INDEX].cpu().numpy().tolist() for y in test_labels]
y_tags_true = [[processor.labels[y] for y in y_group] for y_group in y_true]
y_pred = [pred[y != IGNORED_INDEX].cpu().numpy().tolist() for (pred, y) in zip(pred_labels, test_labels)]
y_tags_pred = [[processor.labels[y] for y in y_group] for y_group in y_pred]
all_y_true.extend(y_tags_true)
all_y_pred.extend(y_tags_pred)
if for_classification:
f1 = f1_score(all_y_true, all_y_pred)
acc = accuracy_score(all_y_true, all_y_pred)
precision = precision_score(all_y_true, all_y_pred)
recall = recall_score(all_y_true, all_y_pred)
else:
f1 = seq_f1_score(all_y_true, all_y_pred)
acc = seq_accuracy_score(all_y_true, all_y_pred)
precision = seq_precision_score(all_y_true, all_y_pred)
recall = seq_recall_score(all_y_true, all_y_pred)
return f1, acc, precision, recall
def read_data(data_dir, processor, tokenizer, lang, split, max_seq_length, model_name, bert_model_type="ori", train_size=-1, seed=42):
pt_name = '%s/%s/%s_%s_%d' % (data_dir, lang, split, model_name, max_seq_length)
if bert_model_type != "ori":
pt_name += f"_{bert_model_type}"
pt_name += ".pt"
if os.path.isfile(pt_name):
with open(pt_name, 'rb') as f:
data = torch.load(f)
logger.info("***** Loading CACHED data for %s *****" % lang)
else:
label_list = processor.get_labels()
if split == 'train':
examples = processor.get_train_examples(data_dir, lang)
elif split == 'dev':
examples = processor.get_dev_examples(data_dir, lang)
elif split == 'test':
examples = processor.get_test_examples(data_dir, lang)
else:
raise Exception('Wrong split %s!' % split)
features = convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer)
logger.info("***** Loading data for %s *****" % lang)
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
data = TensorDataset(input_ids, input_mask, label_ids)
if not os.path.exists(os.path.dirname(pt_name)):
os.makedirs(os.path.dirname(pt_name))
with open(pt_name, 'wb') as f:
torch.save(data, f)
# subsample if
if train_size > 0: # subsample
N = len(data)
# reseed again to guaranttee reproducibility
np.random.seed(seed)
if train_size < N:
sampled_indices = np.random.choice(np.arange(0, N), train_size, replace=False)
else:
sampled_indices = np.arange(0, N)
data_subset = TensorDataset(data.tensors[0][sampled_indices],
data.tensors[1][sampled_indices],
data.tensors[2][sampled_indices])
data = data_subset
logger.info(" Num %s examples = %d", split, len(data))
return data
# create one merged dataset from multiple languages
def merge_data(data_dir, processor, tokenizer, langs, split, max_seq_length, bert_model, bert_model_type, train_size=-1, seed=1, rest_all=False, tgt_lang=None):
if rest_all:
assert tgt_lang is not None, 'Need to specify tgt_lang when rest_all is True!'
data_list = []
for lang in langs:
if rest_all:
if lang == tgt_lang:
data = read_data(data_dir, processor, tokenizer, lang, split, max_seq_length, bert_model, bert_model_type, train_size, seed)
else:
data = read_data(data_dir, processor, tokenizer, lang, split, max_seq_length, bert_model, bert_model_type, -1, seed) # take all for src_langs
else:
data = read_data(data_dir, processor, tokenizer, lang, split, max_seq_length, bert_model, bert_model_type, train_size, seed)
data_list.append(data)
merged_data = TensorDataset(torch.cat([x.tensors[0] for x in data_list], dim=0), # input_ids
torch.cat([x.tensors[1] for x in data_list], dim=0), # input_mask
torch.cat([x.tensors[2] for x in data_list], dim=0)) # label_ids
return merged_data
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default='data/panx_dataset',
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default='bert-base-multilingual-cased',
type=str,
#required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default='panx',
type=str,
#required=True,
help="The name of the task to train.")
parser.add_argument('--tgt_lang',
default='en',
type=str,
required=True,
help='Target language (default: en)')
parser.add_argument("--output_dir",
default='out',
type=str,
#required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
# set a max seq length for training to save GPU-ram in training (testing not affected)
parser.add_argument("--train_max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval or not.")
parser.add_argument("--do_finetune",
action='store_true',
help="Whether to run eval or not.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--train_size',
default=-1,
type=int,
help='Training instances used for training. (-1 for use all)')
parser.add_argument('--target_train_size',
default=-1,
type=int,
help="Training instances of the target language for training. (-1 for use all)")
parser.add_argument("--batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--augcopy",
default=0,
type=int,
help='Number of permuted augmented copies for training')
parser.add_argument("--method",
default='mlt_multi',
choices=['mlt', 'mlt_mix', 'gold_only', 'gold_all', 'gold_mix', 'mlt_multi', 'mlt_multi_mix', 'metaw', 'metawt', 'metawt_multi', 'metaxl', 'joint_training', 'jt-metaxl'],
type=str,
help="Method for meta learning.")
parser.add_argument("--rest_all",
default=False,
action='store_true',
help='Use all train data for source langs (default: False).')
parser.add_argument("--main_lr",
default=1e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--meta_lr",
default=1e-4,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--sinkhorn_lr",
default=1e-4,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--reweighting_lr",
default=1e-4,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--epochs",
default=10.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--weight_decay", default=5e-4, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--data_seed',
type=int,
default=42,
help="random seed for data initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--amp', type=int, default=-1,
help="For fp16: Apex AMP optimization level selected in [0, 1, 2, and 3]."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--layers', type=str, default=None,
help="Layer numbers concatenated with ',', e.g., 1,2,3")
parser.add_argument('--meta_per_lang', action="store_true", default=False,
help="Whether to construct a meta network per language.")
parser.add_argument('--struct', type=str, default="transformer",
help="The stacked structure of transfer component.")
parser.add_argument('--tokenizer_dir', type=str, default=None,
help="The directory of tokenizer for unseen bert languages.")
parser.add_argument('--bert_model_type', type=str, default="ori",
choices=["ori", "empty", "reinitialize_vocab"])
parser.add_argument('--add_permutation', action="store_true", default=False,
help="Whether to add sinkhorn network for token level permutation.")
parser.add_argument('--permutation_hidden_size', type=int, default=768,
help="The hidden size of the permutation network.")
parser.add_argument('--no_skip_connection', action="store_true", default=False,
help="add skip connection or not")
parser.add_argument('--temp', type=float, default=0.1,
help="The temperature of the permutation network.")
parser.add_argument('--num_source_langs', type=int, default=1,
help='The number of source languages used.')
parser.add_argument('--source_language_strategy', type=str, default="random", choices=["random", "language_family", "specified", "random2"],
help='The strategy to select source languages.')
parser.add_argument('--portion', type=int, default=2,
help="1/n used for training")
parser.add_argument('--source_languages', type=str,
help='Source languages that delimited by ,')
parser.add_argument('--add_instance_weights', action="store_true",
help='Whether to reweight instances or not.')
parser.add_argument('--weights_from', type=str, default="features",
help="Where does the feature come from?")
parser.add_argument('--tied', action="store_true",
help="whether the weights are tied or not with the feature network.")
parser.add_argument('--transfer_component_add_weights', action="store_true",
help="add weights for perceptron")
parser.add_argument('--bottle_size', type=int, default=768)
#parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
#parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
args.magic = 1.0
args.every = 1
os.environ['PYTHONHASHSEED'] = str(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
'''
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
'''
processors = {'conll': NerProcessor,
'panx': PANXNerProcessor,
'panx_100': PANXNerProcessor,
'pos': POSProcessor,
'sent': SentClassProcessor
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
#torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, APEX training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.amp))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.batch_size = args.batch_size // args.gradient_accumulation_steps
if not args.do_train and not args.do_eval and not args.do_finetune:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
'''
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
'''
if not args.do_finetune:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
else:
args.output_dir = os.path.dirname(args.output_dir)
# print arguments
for arg in vars(args):
logger.info(f"{arg} = {getattr(args, arg)}")
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
num_labels = len(label_list)
logger.info(f"There are {num_labels} labels. {label_list}")
tokenizer = BERTSequenceTokenizer(args.bert_model, max_len=args.max_seq_length, cache_dir='cache', tokenizer_dir=args.tokenizer_dir)
if task_name == 'panx':
# langs = ['af', 'ar']
langs = ['af', 'ar', 'bg', 'bn', 'bs', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fa',
'fi', 'fr', 'he', 'hi', 'hr', 'hu', 'id', 'it', 'lt', 'lv', 'mk', 'ms', 'nl', 'no',
'pl', 'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sv', 'ta', 'tl', 'tr', 'uk', 'vi']
if args.num_source_langs < 41:
if args.source_language_strategy == 'random':
langs = "vi da ar hi fr fi de cs ca no".split() # random.sample(langs, args.num_source_langs)
elif args.source_language_strategy == 'language_family':
langs = ['he', 'it', 'bn', 'ms', 'vi', 'et', 'ta', 'fi', 'pl', 'tr']
elif args.source_language_strategy == 'specified':
langs = args.source_languages.split(",")
elif task_name == 'panx_100':
langs = ['ace', 'als', 'am', 'ang', 'arc', 'arz', 'as', 'ay', 'ba', 'bar', 'bat-smg', 'bh', 'bo', 'cbk-zam', 'cdo', 'ce', 'ceb', 'co', 'crh', 'csb', 'cv', 'diq', 'dv', 'eml', 'ext', 'fiu-vro', 'fo', 'frr', 'fur', 'gan', 'gd', 'gn', 'gu', 'hak', 'hsb', 'ia', 'ig', 'ilo', 'io', 'jbo', 'jv', 'km', 'kn', 'ksh', 'ku', 'ky', 'li', 'lij', 'lmo', 'ln', 'map-bms', 'mg', 'mhr', 'mi', 'min', 'mn', 'mt', 'mwl', 'my', 'mzn', 'nap', 'nds', 'ne', 'nov', 'oc', 'or', 'os', 'pa', 'pdc', 'pms', 'pnb', 'ps', 'qu', 'rm', 'rw', 'sa', 'sah', 'scn', 'sco', 'sd', 'si', 'so', 'su', 'szl', 'tg', 'tk', 'ug', 'vec', 'vep', 'vls', 'vo', 'wa', 'war', 'wuu', 'xmf', 'yi', 'yo', 'zea', 'zh-classical', 'zh-min-nan']
elif task_name == 'conll':
langs = ['eng', 'esp', 'ned', 'deu']
elif task_name == 'pos':
if args.source_language_strategy == "random":
langs = ['Vietnamese-VTB', 'Basque-BDT', 'Estonian-EDT', 'Arabic-PADT', 'Japanese-BCCWJ', 'Tamil-TTB', 'Korean-GSD', 'Turkish-IMST', 'German-GSD', 'Chinese-GSDSimp']
else:
langs = ['Irish-IDT', 'Latin-Perseus', 'Latvian-LVTB', 'Galician-CTG', 'Japanese-GSD', 'Finnish-FTB', 'Latin-ITTB', 'Afrikaans-AfriBooms', 'Japanese-BCCWJ', 'Spanish-GSD']
elif task_name == 'sent':
if args.source_language_strategy == 'specified':
langs = args.source_languages.split(",")
else:
langs = ["zh", "es", "en", "de", "ja", "fr"]
else:
raise Exception('invalid task name %s!' % task_name)
lang2id = {k:v for v, k in enumerate(langs)}
logging.info("source languages: " + " ".join(langs))
tgt_lang = args.tgt_lang # target languages
src_langs = [x for x in langs if x != tgt_lang]
# load all data
if args.do_train or args.do_finetune:
# note for train, we may sample the data specified by args.train_size
if args.method == 'gold_all':
train_t_data = merge_data(args.data_dir, processor, tokenizer, langs, 'train', args.max_seq_length, args.bert_model, args.bert_model_type, args.target_train_size, seed=args.data_seed, rest_all=args.rest_all, tgt_lang=tgt_lang)
# dev all also needs to subsample
dev_data = merge_data(args.data_dir, processor, tokenizer, langs, 'dev', args.max_seq_length, args.bert_model, args.bert_model_type, -1, seed=args.data_seed)
# not used by gold_all
train_s_data = train_t_data
#read_data(args.data_dir, processor, tokenizer, tgt_lang, 'train', args.max_seq_length, args.train_size, seed=args.seed)
elif args.method == 'metawt_multi' or args.method == "metaxl":
train_s_data = []
train_t_data = read_data(args.data_dir, processor, tokenizer, tgt_lang, 'train', args.max_seq_length, args.bert_model, args.bert_model_type, args.target_train_size, seed=args.data_seed)
# dev all also needs to subsample
for lang in src_langs:
train_s_data.append(read_data(args.data_dir, processor, tokenizer, lang, 'train', args.max_seq_length, args.bert_model, args.bert_model_type, args.train_size, seed=args.data_seed))
# same subsample size for dev, as using a tiny train + a full dev doesn't seem to make sense
dev_data = read_data(args.data_dir, processor, tokenizer, tgt_lang, 'dev', args.max_seq_length, args.bert_model, args.bert_model_type, -1, seed=args.data_seed)
else: # for method == gold_only, mlt_multi
train_t_data = read_data(args.data_dir, processor, tokenizer, tgt_lang, 'train', args.max_seq_length, args.bert_model, args.bert_model_type, args.target_train_size, seed=args.data_seed)
# train_s will be much larger than train_t as it contains multiple languages
# train_s not used by gold_only
if args.method != "gold_only":
train_s_data = merge_data(args.data_dir, processor, tokenizer, src_langs, 'train', args.max_seq_length, args.bert_model, args.bert_model_type, -1 if args.rest_all else args.train_size, seed=args.data_seed)
# same subsample size for dev, as using a tiny train + a full dev doesn't seem to make sense
dev_data = read_data(args.data_dir, processor, tokenizer, tgt_lang, 'dev', args.max_seq_length, args.bert_model, args.bert_model_type, -1, seed=args.data_seed)
# no subsample for dev and test
test_data = read_data(args.data_dir, processor, tokenizer, tgt_lang, 'test', args.max_seq_length, args.bert_model, args.bert_model_type, )
logger.info(f"First example: {train_t_data[0][0][:10]}")
if args.local_rank == -1:
train_t_sampler = None
train_s_sampler = None
dev_sampler = None
test_sampler = None
batch_size = args.batch_size
else:
train_t_sampler = DistributedRandomSampler(train_t_data)
train_s_sampler = DistributedRandomSampler(train_s_dataa)
dev_sampler = DistributedSampler(dev_data)
test_sampler = DistributedSampler(test_data)
batch_size = int(args.batch_size / int(os.environ['NGPU']))
train_t_loader = DataIterator(DataLoader(train_t_data, sampler=train_t_sampler, batch_size=batch_size, shuffle=(train_t_sampler is None)))
if args.method == 'metawt_multi' or args.method == "metaxl": # this only supports single GPU mode
train_s_loaders = [DataIterator(DataLoader(train_s_data[i], sampler=None, batch_size=batch_size, shuffle=True)) for i in range(len(src_langs))]
elif args.method == "metawt" or args.method == "metaxl" or args.method == "jt-metaxl" or args.method == "joint_training":
train_s_loaders = [DataLoader(train_s_data, sampler=train_s_sampler, batch_size=batch_size, shuffle=(train_s_sampler is None))]
dev_loader = DataIterator(DataLoader(dev_data, sampler=dev_sampler, batch_size=batch_size, shuffle=(dev_sampler is None)))
test_loader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size, shuffle=(test_sampler is None))
elif args.do_eval:
dev_data = read_data(args.data_dir, processor, tokenizer, tgt_lang, 'dev', args.max_seq_length, args.bert_model, args.bert_model_type, -1, seed=args.data_seed)
# no subsample for dev and test
test_data = read_data(args.data_dir, processor, tokenizer, tgt_lang, args.bert_model, args.bert_model_type, 'test', args.max_seq_length)
dev_loader = DataIterator(
DataLoader(dev_data, sampler=None, batch_size=args.batch_size, shuffle=False))
test_loader = DataLoader(test_data, sampler=None, batch_size=args.batch_size, shuffle=False)
# Prepare model
is_xlmr = args.bert_model.startswith("xlm")
ConfigClass = XLMRobertaConfig if is_xlmr else BertConfig
SequenceTagger = XLMRSequenceTagger if is_xlmr else BERTSequenceTagger
if not args.do_train and (args.do_finetune or args.do_eval):
config = ConfigClass.from_json_file(os.path.join(args.output_dir, "config.json"))
model = SequenceTagger(config)
logger.info(f"Loading an empty bert model with a vocab size {config.vocab_size}")
elif args.do_train:
config = ConfigClass.from_pretrained(args.bert_model, num_labels=num_labels, finetuning_task=args.task_name,
output_hidden_states=True, cache_dir='cache')
if args.bert_model_type == "empty":
config.vocab_size = tokenizer.tokenizer.vocab_size
model = SequenceTagger(config)
logger.info(f"Loading an empty bert model with a vocab size {config.vocab_size}")
else:
model = SequenceTagger.from_pretrained(args.bert_model, config=config, cache_dir='cache')
embeddings = model.roberta.embeddings if is_xlmr else model.bert.embeddings
if args.bert_model_type == "reinitialize_vocab":
config.vocab_size = tokenizer.tokenizer.vocab_size
pretrained_embeddings = embeddings.word_embeddings.weight.data.clone()
embeddings.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
original_tokenizer = BERTSequenceTokenizer(args.bert_model, max_len=args.max_seq_length, cache_dir='cache')
for i, word in enumerate(tokenizer.tokenizer.vocab):
if word in original_tokenizer.tokenizer.vocab:
index = original_tokenizer.tokenizer.convert_tokens_to_ids(word)
embeddings.word_embeddings.weight[i].data.copy_(pretrained_embeddings[index])
logger.info(f"Reloaded bert embeddings with a vocab size {config.vocab_size}")
if args.layers is not None:
layers = args.layers.split(",")
else:
layers = []
if args.method in ['metaw', 'metawt']:
raptors = VNet(1, 512, 1)
elif args.method in ['metaw_multi', 'metawt_multi']:
raptors = WNets(512, len(src_langs))
elif args.method == "metaxl" or args.method == "jt-metaxl":
raptors = Raptors(config, len(layers), len(src_langs) if args.meta_per_lang else 1, struct=args.struct, add_weights=args.transfer_component_add_weights, tied=args.tied, bottle_size=args.bottle_size)
else:
raptors = None # Raptors vs Raptor
# permutate_network = None
# if args.add_permutation:
# permutate_network = Permutation(config=config, in_dim=config.hidden_size, h_dim=args.permutation_hidden_size,
# out_dim=config.max_position_embeddings, temp=args.temp, no_skip_connection=args.no_skip_connection)
reweighting_module = None
if args.add_instance_weights:
if args.weights_from == "features":
reweighting_module = VNet(config.hidden_size, args.bottle_size, 1)
elif args.weights_from == "loss":
reweighting_module = VNet(1, args.bottle_size, 1)
if not args.do_train and (args.do_finetune or args.do_eval):
model.load_state_dict(torch.load(os.path.join(args.output_dir, "best.pt")))
if raptors is not None:
raptors.load_state_dict(torch.load(os.path.join(args.output_dir, "best_meta.pt")))
logging.info(f"Reloaded model and raptors from best.pt, best_meta.pt.")
# if permutate_network is not None:
# permutate_network.load_state_dict(torch.load(os.path.join(args.output_dir, "best_permutation.pt")))
# logging.info(f"Reloaded permutate network from best_permutation.pt.")
if reweighting_module is not None:
reweighting_module.load_state_dict(torch.load(os.path.join(args.output_dir), "best_weights.pt"))
num_model_parameters = calculate_parameters(model)
num_meta_network_parameters = 0
num_permutate_network = 0
num_reweighting_network = 0
if raptors is not None:
num_meta_network_parameters = calculate_parameters(raptors)
# if permutate_network is not None:
# num_permutate_network = calculate_parameters(permutate_network)
if reweighting_module is not None:
num_reweighting_network = calculate_parameters(reweighting_module)
total_parameters = num_model_parameters + num_meta_network_parameters + num_permutate_network + num_reweighting_network
logging.info(f"Model parameters: {num_model_parameters}")
logging.info(f"Meta network parameters: {num_meta_network_parameters}")
logging.info(f"Permutation network parameters: {num_permutate_network}")
logging.info(f"Reweighting network parameters: {num_reweighting_network}")
logging.info(f"Total parameters: {total_parameters}")
# if args.local_rank == 0:
# torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(device)
if raptors is not None:
raptors.to(device)
# if permutate_network is not None:
# permutate_network.to(device)
if reweighting_module is not None:
reweighting_module.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.main_lr, eps=args.adam_epsilon, weight_decay=args.weight_decay)
meta_opt = None
if raptors is not None:
meta_opt = torch.optim.Adam(raptors.parameters(), lr=args.meta_lr, eps=args.adam_epsilon, weight_decay=args.weight_decay)
# if permutate_network is not None:
# sinkhorn_opt = torch.optim.Adam(permutate_network.parameters(), lr=args.sinkhorn_lr, eps=args.adam_epsilon, weight_decay=args.weight_decay)
# logging.info("Initialized sinkhorn optimizer.")
reweighting_opt = None
if reweighting_module is not None:
reweighting_opt = torch.optim.Adam(reweighting_module.parameters(), lr=args.reweighting_lr, eps=args.adam_epsilon, weight_decay=args.weight_decay)
logging.info("Initialized reweighting optimizer.")
# change this for multiple train_s loader settings
if args.do_train or args.do_finetune:
if args.method != "gold_only":
if type(train_s_data) is list:
num_train_optimization_steps = sum([len(x) for x in train_s_data]) * args.epochs / batch_size
else:
num_train_optimization_steps = len(train_s_data) * args.epochs / batch_size # note the steps is counted based on train_s, which is 40/41 langs
else:
num_train_optimization_steps = len(
train_t_data) * args.epochs / batch_size # note the steps is counted based on train_s, which is 40/41 langs
if args.do_train:
warmup_steps = int(args.warmup_proportion * num_train_optimization_steps)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps)
if raptors is not None:
meta_scheduler = get_linear_schedule_with_warmup(meta_opt, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps)
# if args.add_permutation:
# permutation_scheduler = get_linear_schedule_with_warmup(sinkhorn_opt, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps)
if args.add_instance_weights:
reweighting_scheduler = get_linear_schedule_with_warmup(reweighting_opt, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps)
if args.amp > -1:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
if args.do_train:
# naive logging file
_logw=open(os.path.join(args.output_dir, 'logging.txt'), 'w')
best_val_metric = float('inf')
model.train()
#for epoch in trange(int(args.epochs), desc="Epoch"):
if args.method == "gold_only":
for step, batch_train_t in enumerate(tqdm(train_t_loader.loader, desc="Iteration")):
batch_train_t = tuple(t.to(device) for t in batch_train_t) # tgt train
train_t_ids, train_t_mask, train_t_labels = batch_train_t
train_t_ids, train_t_mask, train_t_labels = trim_input(train_t_ids, train_t_mask, train_t_labels, args.train_max_seq_length)
if len(train_t_ids) == 1:
continue
if args.augcopy > 0:
train_t_ids, train_t_mask, train_t_labels = permute_aug(train_t_ids, train_t_mask, train_t_labels, args.augcopy)
loss_meta, loss_train = step_gold_only(model, optimizer,
train_t_ids, train_t_mask, train_t_labels,
args)
if step % args.every == 0 and args.local_rank <= 0: # only do eval on first GPU
tqdm.write('Step:%d\tLoss_Meta:%.6f\tLoss_Train:%.6f' % (step, (loss_meta).item(), (loss_train).item()))
scheduler.step()
else:
# *** 1. initialize uniform lang weights
source_langs_num = len(src_langs)
logger.info(f"Languages number = {source_langs_num}")
weights = [1.0] * source_langs_num
gamma = 0.01
src_data_size = args.train_size
max_step_size = args.epochs * src_data_size
eval_every = 250
loss_meta_before = float("inf")
# *** 2. initialize batch queues
# Maybe nothing to do
# *** 3. while top-k or top-p choose language by lang distribution - remember to clear empty queues and update probs
# *** update : we currently chose to use max_step_size
for step in (tqdm(np.arange(1, max_step_size + 1))):
logger.info(f"Step = {step}")
#for j, train_s_loader in enumerate(train_s_loaders): # note here
# *** Choosing a language
probability_distribution = distr(weights, gamma)
lang_index = draw(probability_distribution)
logger.info(f"Chosen language index = {lang_index}")
logger.info(f"Updating on language {src_langs[lang_index]} with probability: {probability_distribution[lang_index]}")
train_s_loader = train_s_loaders[lang_index]
batch_train_s = next(train_s_loader) # check if er need iterator for that
logger.info(f"Updating on language {src_langs[lang_index]} with probability: {probability_distribution[lang_index]}")
logger.info(f"Training batch size = {batch_size}")
# for step, batch_train_s in enumerate(tqdm(train_s_loader, desc="Iteration")): # count epoch based on merged src langs loader
batch_train_t = next(train_t_loader)
batch_train_s = tuple(t.to(device) for t in batch_train_s) # src train
batch_train_t = tuple(t.to(device) for t in batch_train_t) # tgt train
train_s_ids, train_s_mask, train_s_labels = batch_train_s
train_s_ids, train_s_mask, train_s_labels = trim_input(train_s_ids, train_s_mask, train_s_labels, args.train_max_seq_length)
# print(train_s_labels)
train_t_ids, train_t_mask, train_t_labels = batch_train_t
train_t_ids, train_t_mask, train_t_labels = trim_input(train_t_ids, train_t_mask, train_t_labels, args.train_max_seq_length)
eta = scheduler.get_last_lr()[0]
# print("*"*20, "source", "*"*20)
# print(train_s_ids.shape, train_s_labels.shape)
# print("*" * 20, "target", "*" * 20)
# print(train_t_ids.shape, train_t_labels.shape)
if args.method == "metaxl":
half = int(len(train_t_ids)/args.portion)
eval_ids, eval_mask, eval_labels = train_t_ids[half:], train_t_mask[half:], train_t_labels[half:]
train_t_ids, train_t_mask, train_t_labels = train_t_ids[:half], train_t_mask[:half], train_t_labels[:half]
if len(eval_ids) == 0 or len(train_t_ids) == 0:
continue
print("*"*20, "source", "*"*20)
print(eval_ids.shape, eval_labels.shape)
print("*" * 20, "target", "*" * 20)
print(train_t_ids.shape, train_t_labels.shape)
layers = [int(l) for l in layers]
loss_meta, loss_train = step_metaxl(model, optimizer,
raptors, meta_opt,
reweighting_module, reweighting_opt,
train_s_ids, train_s_mask, train_s_labels,
train_t_ids, train_t_mask, train_t_labels,
eval_ids, eval_mask, eval_labels,
j if args.meta_per_lang else 0, layers, eta, args)
# *** update here distribution by loss_meta using exp3
exp3_loss = normalize_loss(loss_meta)
#exp3_loss = 1 if loss_meta >= loss_meta_before else 0
loss_meta_before = loss_meta
estimated_loss = 1.0 * exp3_loss / probability_distribution[lang_index]
weights[lang_index] *= math.exp(1.0 * estimated_loss * gamma / source_langs_num)
logger.info(f"Exp3 loss: {exp3_loss}")
#logger.info(f"New weight for lang {src_langs[lang_index]} = {weights[lang_index]}")
logger.info(f"New lang distribution {distr(weights, gamma)} for source langauges: {src_langs}")
# logger.info(raptors.nets[0][0].weight)
# logger.info(meta_opt)
# logger.info(optimizer)
elif args.method == "jt-metaxl":
layers = [int(l) for l in layers]
loss_meta, loss_train = step_jt_metaxl(model, optimizer,
raptors, meta_opt,
reweighting_module, reweighting_opt,
train_s_ids, train_s_mask, train_s_labels,
train_t_ids, train_t_mask, train_t_labels,
j if args.meta_per_lang else 0, layers, eta, args)
elif args.method == 'metawt':
half = int(len(train_t_ids) / 2)
eval_ids, eval_mask, eval_labels = train_t_ids[half:], train_t_mask[half:], train_t_labels[
half:]
train_t_ids, train_t_mask, train_t_labels = train_t_ids[:half], train_t_mask[
:half], train_t_labels[
:half]
loss_meta, loss_train = step_metawt_mix(model, optimizer, raptors, meta_opt,
train_s_ids, train_s_mask, train_s_labels,
train_t_ids, train_t_mask, train_t_labels,
eval_ids, eval_mask, eval_labels,
eta, args)
elif args.method == 'metawt_multi':
half = int(len(train_t_ids) / 2)
eval_ids, eval_mask, eval_labels = train_t_ids[half:], train_t_mask[half:], train_t_labels[half:]
train_t_ids, train_t_mask, train_t_labels = train_t_ids[:half], train_t_mask[:half], train_t_labels[:half]
loss_meta, loss_train = step_metawt_multi_mix(model, optimizer, raptors, meta_opt,
train_s_ids, train_s_mask, train_s_labels,
train_t_ids, train_t_mask, train_t_labels,
eval_ids, eval_mask, eval_labels,
eta, args, j)
elif args.method in "joint_training":
loss_meta, loss_train = step_gold_mix(model, optimizer,
data_s=train_s_ids, mask_s=train_s_mask, target_s=train_s_labels,
data_g=train_t_ids, mask_g=train_t_mask, target_g=train_t_labels,
args=args)
else:
raise Exception('Method %s not implemented yet.' % args.method)
logger.info("Step: " + str(step) + "\n")
if step % args.every == 0 and args.local_rank <= 0: # only do eval on first GPU
tqdm.write('Step:%d\tLoss_Meta:%.6f\tLoss_Train:%.6f' % (step, loss_meta.item(), loss_train.item()))
logger.info('Step:%d\tLoss_Meta:%.6f\tLoss_Train:%.6f\n' % (step, loss_meta.item(), loss_train.item()))
# scheduler update per step
scheduler.step()
if raptors is not None:
meta_scheduler.step()
# if args.add_permutation:
# permutation_scheduler.step()
if args.add_instance_weights:
reweighting_scheduler.step()
if(step % eval_every == 0):
#torch.save(model.state_dict(), os.path.join(args.output_dir, 'last.pt'))
model.eval()
if raptors is not None:
raptors.eval()
# if permutate_network is not None:
# permutate_network.eval()
if reweighting_module is not None:
reweighting_module.eval()
val_score, val_acc, val_precision, val_recall = eval(model, dev_loader.loader, processor, for_classification=(args.task_name=="sent"))
test_score, test_acc, test_precision, test_recall = eval(model, test_loader, processor, for_classification=(args.task_name=="sent"))
model.train()
if raptors is not None:
raptors.train()
# if permutate_network is not None:
# permutate_network.train()
if reweighting_module is not None:
reweighting_module.train()
if args.local_rank <=0 and -test_score < best_val_metric: # val_acc: the larger the better
best_val_metric = -test_score
# torch.save(model.state_dict(), os.path.join(args.output_dir, 'best.pt'))
# if raptors is not None:
# torch.save(raptors.state_dict(), os.path.join(args.output_dir, 'best_meta.pt'))
# if args.add_permutation:
# torch.save(permutate_network.state_dict(), os.path.join(args.output_dir, 'best_permutate.pt'))
# if args.add_instance_weights:
# torch.save(reweighting_module.state_dict(), os.path.join(args.output_dir, 'best_weights.pt'))
'''
alphas = raptors.get_alpha().detach().cpu().numpy()
'''
tqdm.write('Loss_Meta:%.4f\tLoss_Train:%.4f\tDev F1:%.4f\tDev ACC:%.4f\tDev Precision:%.4f\tDev Recall:%.4f\tBest Test F1 so far:%.4f' % (loss_meta.item(), loss_train.item(), val_score, val_acc, val_precision, val_recall, -best_val_metric))
tqdm.write('Loss_Meta:%.4f\tLoss_Train:%.4f\tTest F1:%.4f\tTest ACC:%.4f\tTest Precision:%.4f\tTest Recall:%.4f' % (loss_meta.item(), loss_train.item(), test_score, test_acc, test_precision, test_recall))
_logw.write('%s\tstep: %d\tDev F1: %.4f\tTest F1: %.4f\n' % (tgt_lang, step, val_score, test_score))
_logw.write(f"\tNew lang distribution: %s \tfor source langauges: %s\n" % (str(distr(weights, gamma)), str(src_langs)))
_logw.flush()
os.fsync(_logw)
# eval on best model saved so far
print ('====== Final performance =======')
model.load_state_dict(torch.load(os.path.join(args.output_dir, 'best.pt')))
model.eval()
score, acc, precision, recall = eval(model, test_loader, processor, for_classification=(args.task_name == "sent"))
print ('Best Test F1:', -best_val_metric)
print ('Test F1:', score, 'Test ACC:', acc, 'Precision:', precision, 'Recall:', recall)
_logw.write('%s\tFinal best Test F1: %.4f\tTest F1: %.4f\n' % (tgt_lang, -best_val_metric, score))
_logw.flush()
os.fsync(_logw)
# close ad-hoc log
_logw.close()
with open(os.path.join(args.output_dir, 'result.txt'), 'w') as w:
w.write('Test F1: %.4f\tTest ACC: %.4f\tPrecision: %.4f\tRecall: %.4f\n' % (score, acc, precision, recall))
w.write('Best Test F1: %.4f\n' % (-best_val_metric))
w.write('Test F1: %.4f\n' % (score))
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(args.output_dir)
#tokenizer.save_pretrained(args.output_dir)
label_map = {i : label for i, label in enumerate(label_list,1)}
model_config = {'bert_model':args.bert_model, 'max_seq_length':args.max_seq_length,'num_labels':len(label_list)+1,'label_map':label_map}
json.dump(model_config,open(os.path.join(args.output_dir,'model_config.json'),'w'))
# Load a trained model and config that you have fine-tuned
elif args.do_finetune:
_logw = open(os.path.join(args.output_dir, 'logging_finetune.txt'), 'w')
best_val_metric = float('inf')
best_test_metric = float('inf')
model.train()
raptors.train()
# if args.add_permutation:
# permutate_network.train()
for epoch in trange(int(args.epochs), desc="Epoch"):
for j, train_s_loader in enumerate(train_s_loaders): # note here
logger.info(f"Updating on language {src_langs[j]} ..")
logger.info(f"Training batch size = {batch_size}")
for step, batch_train_s in enumerate(
tqdm(train_s_loader, desc="Iteration")): # count epoch based on merged src langs loader
batch_train_t = next(train_t_loader)
batch_train_s = tuple(t.to(device) for t in batch_train_s) # src train
batch_train_t = tuple(t.to(device) for t in batch_train_t) # tgt train
train_s_ids, train_s_mask, train_s_labels = batch_train_s
train_s_ids, train_s_mask, train_s_labels = trim_input(train_s_ids, train_s_mask, train_s_labels,
args.train_max_seq_length)
train_t_ids, train_t_mask, train_t_labels = batch_train_t
train_t_ids, train_t_mask, train_t_labels = trim_input(train_t_ids, train_t_mask, train_t_labels,
args.train_max_seq_length)
layers = [int(l) for l in layers]
loss_t, loss_s = step_metaxl_finetune(model, optimizer, raptors,
train_s_ids, train_s_mask, train_s_labels,
train_t_ids, train_t_mask, train_t_labels,
j if args.meta_per_lang else 0, layers, args)
model.eval()
val_score, val_acc, val_precision, val_recall = eval(model, dev_loader.loader, processor)
test_score, test_acc, test_precision, test_recall = eval(model, test_loader, processor)
model.train()
if args.local_rank <= 0 and -test_score < best_val_metric: # val_acc: the larger the better
best_val_metric = -val_score
torch.save(model.state_dict(), os.path.join(args.output_dir, 'best_finetune.pt'))
if args.local_rank <= 0 and -test_score < best_test_metric: # val_acc: the larger the better
best_test_metric = -test_score
'''
alphas = raptors.get_alpha().detach().cpu().numpy()
'''
tqdm.write(
'Loss_s:%.4f\tLoss_t:%.4f\tLoss:%.4f\tDev F1:%.4f\tDev ACC:%.4f\tDev Precision:%.4f\tDev Recall:%.4f\tBest Dev F1 so far:%.4f\tBest Test F1 so far:%.4f' % (
loss_s.item(), loss_t.item(), loss_s.item() + loss_t.item(), val_score, val_acc, val_precision, val_recall, -best_val_metric, -best_test_metric))
tqdm.write(
'Test F1:%.4f\tTest ACC:%.4f\tTest Precision:%.4f\tTest Recall:%.4f' % (test_score, test_acc, test_precision, test_recall))
_logw.write('%s\t%d\tDev F1: %.4f\tTest F1: %.4f\n' % (tgt_lang, epoch, val_score, test_score))
_logw.flush()
os.fsync(_logw)
# eval on best model saved so far
print('====== Final performance =======')
model.load_state_dict(torch.load(os.path.join(args.output_dir, 'best_finetune.pt')))
model.eval()
score, acc, precision, recall = eval(model, test_loader, processor)
print('Best Dev F1:', -best_val_metric)
print('Best Test F1:', -best_test_metric)
print('Test F1:', score, 'Test ACC:', acc, 'Precision:', precision, 'Recall:', recall)
_logw.write('%s\tFinal best Dev F1: %.4f\tTest F1: %.4f\n' % (tgt_lang, -best_val_metric, score))
_logw.write('%s\tFinal best test F1: %.4f\n' % (tgt_lang, -best_test_metric))
_logw.flush()
os.fsync(_logw)
# close ad-hoc log
_logw.close()
with open(os.path.join(args.output_dir, 'result_finetune.txt'), 'w') as w:
w.write(
'Test F1: %.4f\tTest ACC: %.4f\tPrecision: %.4f\tRecall: %.4f\n' % (score, acc, precision, recall))
w.write('Best Dev F1: %.4f\n' % (-best_val_metric))
w.write('Test F1: %.4f\n' % (score))
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# model_to_save.save_pretrained(args.output_dir)
elif args.do_eval:
# Load a trained model and vocabulary that you have fine-tuned
model.eval()
_logw = open(os.path.join(args.output_dir, 'all_result.txt'), 'w')
score, acc, precision, recall = eval(model, dev_loader.loader, processor)
print('%s\tDev F1: %.4f\tDev ACC: %.4f\tPrecision: %.4f\tRecall: %.4f' % (tgt_lang, score, acc, precision, recall))
_logw.write('%s\tDev F1: %.4f\tDev ACC: %.4f\tPrecision: %.4f\tRecall: %.4f\n' % (tgt_lang, score, acc, precision, recall))
score, acc, precision, recall = eval(model, test_loader, processor)
print('%s\tTest F1: %.4f\tTest ACC: %.4f\tPrecision: %.4f\tRecall: %.4f' % (tgt_lang, score, acc, precision, recall))
_logw.write('%s\tTest F1: %.4f\tTest ACC: %.4f\tPrecision: %.4f\tRecall: %.4f\n' % (tgt_lang, score, acc, precision, recall))
_logw.close()
if __name__ == '__main__':
main()
| 68,164 | 50.25188 | 698 | py |
glc | glc-master/SST/SST_experiments_pytorch.py | import numpy as np
import re
import collections
import pickle
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='sst label corruption experiments')
parser.add_argument('--method', default='ours', type=str, choices=['ours', 'forward', 'ideal', 'confusion', 'forward_gold'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
args = parser.parse_args()
print(args)
print('CUDA available:', torch.cuda.is_available())
def load_data(filename='./data/SST/senti.train.onlyroot'):
'''
:param filename: the system location of the data to load
:return: the text (x) and its label (y)
the text is a list of words and is not processed
'''
# stop words taken from nltk
stop_words = ['i','me','my','myself','we','our','ours','ourselves','you','your','yours',
'yourself','yourselves','he','him','his','himself','she','her','hers','herself',
'it','its','itself','they','them','their','theirs','themselves','what','which',
'who','whom','this','that','these','those','am','is','are','was','were','be',
'been','being','have','has','had','having','do','does','did','doing','a','an',
'the','and','but','if','or','because','as','until','while','of','at','by','for',
'with','about','against','between','into','through','during','before','after',
'above','below','to','from','up','down','in','out','on','off','over','under',
'again','further','then','once','here','there','when','where','why','how','all',
'any','both','each','few','more','most','other','some','such','no','nor','not',
'only','own','same','so','than','too','very','s','t','can','will','just','don',
'should','now','d','ll','m','o','re','ve','y','ain','aren','couldn','didn',
'doesn','hadn','hasn','haven','isn','ma','mightn','mustn','needn','shan',
'shouldn','wasn','weren','won','wouldn']
x, y = [], []
with open(filename, "r") as f:
for line in f:
line = re.sub(r'\W+', ' ', line).strip().lower() # perhaps don't make words lowercase?
x.append(line[:-1])
x[-1] = ' '.join(word for word in x[-1].split() if word not in stop_words)
y.append(line[-1])
return x, np.array(y, dtype=int)
def get_vocab(dataset):
'''
:param dataset: the text from load_data
:return: a _ordered_ dictionary from words to counts
'''
vocab = {}
# create a counter for each word
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] = 0
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] += 1
# sort from greatest to least by count
return collections.OrderedDict(sorted(vocab.items(), key=lambda x: x[1], reverse=True))
def text_to_rank(dataset, _vocab, desired_vocab_size=5000):
'''
:param dataset: the text from load_data
:vocab: a _ordered_ dictionary of vocab words and counts from get_vocab
:param desired_vocab_size: the desired vocabulary size
words no longer in vocab become UUUNNNKKK
:return: the text corpus with words mapped to their vocab rank,
with all sufficiently infrequent words mapped to UUUNNNKKK; UUUNNNKKK has rank desired_vocab_size
(the infrequent word cutoff is determined by desired_vocab size)
'''
_dataset = dataset[:] # aliasing safeguard
vocab_ordered = list(_vocab)
count_cutoff = _vocab[vocab_ordered[desired_vocab_size-1]] # get word by its rank and map to its count
word_to_rank = {}
for i in range(len(vocab_ordered)):
# we add one to make room for any future padding symbol with value 0
word_to_rank[vocab_ordered[i]] = i + 1
# we need to ensure that other words below the word on the edge of our desired_vocab size
# are not also on the count cutoff, so we subtract a bit
# this is likely quicker than adding another preventative if case
for i in range(len(vocab_ordered[desired_vocab_size:])):
_vocab[vocab_ordered[desired_vocab_size+i]] -= 0.1
for i in range(len(_dataset)):
example = _dataset[i]
example_as_list = example.split()
for j in range(len(example_as_list)):
try:
if _vocab[example_as_list[j]] >= count_cutoff:
example_as_list[j] = word_to_rank[example_as_list[j]]
else:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
except:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
_dataset[i] = example_as_list
return _dataset
# taken from keras
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
'''Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
# //////////////////////// loading data ////////////////////////
max_example_len = 30
batch_size = 50
embedding_dims = 100
vocab_size = 10000
init_lr = 5e-4
reg_str = 1e-5
num_epochs = 5
print('Loading Data')
X_train, Y_train = load_data('./data/SST/senti.binary.train')
X_dev, Y_dev = load_data('./data/SST/senti.binary.dev')
X_test, Y_test = load_data('./data/SST/senti.binary.test')
num_classes = 2
vocab = get_vocab(X_train)
X_train = text_to_rank(X_train, vocab, vocab_size)
X_dev = text_to_rank(X_dev, vocab, vocab_size)
X_test = text_to_rank(X_test, vocab, vocab_size)
X_train = np.array(pad_sequences(X_train, maxlen=max_example_len), dtype=np.long)
X_dev = np.array(pad_sequences(X_dev, maxlen=max_example_len), dtype=np.long)
X_test = np.array(pad_sequences(X_test, maxlen=max_example_len), dtype=np.long)
Y_train = np.array(Y_train, dtype=np.long)
Y_dev = np.array(Y_dev, dtype=np.long)
Y_test = np.array(Y_test, dtype=np.long)
print('Data loaded')
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
examples = np.copy(X_train)
labels = np.copy(Y_train)
if merge_valset:
examples = np.concatenate([examples, np.copy(X_dev)], axis=0)
labels = np.concatenate([labels, np.copy(Y_dev)])
indices = np.arange(len(labels))
np.random.shuffle(indices)
examples = examples[indices]
labels = labels[indices]
num_gold = int(len(labels)*gold_fraction)
num_silver = len(labels) - num_gold
for i in range(num_silver):
labels[i] = np.random.choice(num_classes, p=corruption_matrix[labels[i]])
dataset = {'x': examples, 'y': labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
return dataset, gold, num_gold, num_silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
# //////////////////////// defining graph ////////////////////////
class WordAveragingLinear(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Embedding(vocab_size+1, embedding_dims, padding_idx=0)
self.out = nn.Linear(embedding_dims, num_classes)
self.init_weights()
def init_weights(self):
self.embedding.weight.data.uniform_(-np.sqrt(6. / (vocab_size+1 + embedding_dims)),
np.sqrt(6. / (vocab_size+1 + embedding_dims)))
self.out.weight.data.normal_(0, 1 / np.sqrt(embedding_dims))
self.out.bias.data.zero_()
def forward(self, x):
return self.out(self.embedding(x).mean(1))
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = WordAveragingLinear().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=init_lr, weight_decay=0)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver = prepare_data(C, gold_fraction)
# //////////////////////// train for estimation ////////////////////////
if method == 'ours' or method == 'confusion' or method == 'forward_gold' or method == 'ideal':
num_examples = num_silver
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
l2_loss = (net.out.weight**2).sum() / 2
loss = F.cross_entropy(output, target) + (reg_str * l2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
baseline_acc = correct / len(Y_test)
# //////////////////////// estimate C ////////////////////////
if method == 'ours':
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
indices = np.arange(len(gold['y']))[gold['y'] == label]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
elif method == 'forward' or method == 'forward_gold':
probs = F.softmax(net(V(torch.from_numpy(dataset['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
class_probs = probs[:,label]
thresh = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= thresh] = 0
C_hat[label] = probs[np.argsort(class_probs)][-1]
elif method == 'ideal': C_hat = C
elif method == 'confusion':
# directly estimate confusion matrix on gold
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(gold['y'])):
C_hat[gold['y'][i], preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01
print('True C:', np.round(C, decimals=3))
print('C_hat:', np.round(C_hat, decimals=3))
C_hat = V(torch.from_numpy(C_hat.astype(np.float32))).cuda()
# //////////////////////// retrain with correction ////////////////////////
net.train()
net.init_weights()
optimizer = torch.optim.Adam(net.parameters(), lr=init_lr, weight_decay=0)
if method == 'ours' or method == 'ideal' or method == 'confusion' or method == 'forward_gold':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
current_indices = indices[offset:offset + batch_size]
data = dataset['x'][current_indices]
target = dataset['y'][current_indices]
gold_indices = current_indices >= num_silver
silver_indices = current_indices < num_silver
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices]
data_g, target_g = V(torch.LongTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.LongTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
output_s -= torch.max(output_s, 1, keepdim=True)[0]
output_s = torch.log(torch.mm(F.softmax(output_s), C_hat))
loss_s = F.cross_entropy(output_s, target_s, size_average=False)
# pre1 = C_hat.t()[torch.cuda.LongTensor(target_s.data)]
# pre2 = torch.mul(F.softmax(output_s), pre1)
# loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
l2_loss = (net.out.weight**2).sum() / 2
loss = (loss_g + loss_s)/batch_size + (reg_str * l2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
pre1 = C_hat.t()[torch.cuda.LongTensor(target.data)]
pre2 = torch.mul(F.softmax(output), pre1)
loss = -(torch.log(pre2.sum(1))).mean(0)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# //////////////////////// evaluate method ////////////////////////
net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(Y_test)
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method + '_' + args.corruption_type
results = {}
for gold_fraction in [0.001, 0.01, 0.05]:
results[gold_fraction] = {}
for corruption_level in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 19,072 | 38.00409 | 124 | py |
glc | glc-master/SST/SST_convex_combo.py | import numpy as np
import re
import collections
import pickle
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='sst label corruption experiments')
parser.add_argument('--method', default='combo', type=str, choices=['ours', 'forward', 'ideal', 'confusion', 'forward_gold', 'combo'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
parser.add_argument('--lambda_choice', default='theirs', choices=['theirs', '1_minus_theirs', '0.5'])
args = parser.parse_args()
print(args)
print('CUDA available:', torch.cuda.is_available())
def load_data(filename='./data/SST/senti.train.onlyroot'):
'''
:param filename: the system location of the data to load
:return: the text (x) and its label (y)
the text is a list of words and is not processed
'''
# stop words taken from nltk
stop_words = ['i','me','my','myself','we','our','ours','ourselves','you','your','yours',
'yourself','yourselves','he','him','his','himself','she','her','hers','herself',
'it','its','itself','they','them','their','theirs','themselves','what','which',
'who','whom','this','that','these','those','am','is','are','was','were','be',
'been','being','have','has','had','having','do','does','did','doing','a','an',
'the','and','but','if','or','because','as','until','while','of','at','by','for',
'with','about','against','between','into','through','during','before','after',
'above','below','to','from','up','down','in','out','on','off','over','under',
'again','further','then','once','here','there','when','where','why','how','all',
'any','both','each','few','more','most','other','some','such','no','nor','not',
'only','own','same','so','than','too','very','s','t','can','will','just','don',
'should','now','d','ll','m','o','re','ve','y','ain','aren','couldn','didn',
'doesn','hadn','hasn','haven','isn','ma','mightn','mustn','needn','shan',
'shouldn','wasn','weren','won','wouldn']
x, y = [], []
with open(filename, "r") as f:
for line in f:
line = re.sub(r'\W+', ' ', line).strip().lower() # perhaps don't make words lowercase?
x.append(line[:-1])
x[-1] = ' '.join(word for word in x[-1].split() if word not in stop_words)
y.append(line[-1])
return x, np.array(y, dtype=int)
def get_vocab(dataset):
'''
:param dataset: the text from load_data
:return: a _ordered_ dictionary from words to counts
'''
vocab = {}
# create a counter for each word
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] = 0
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] += 1
# sort from greatest to least by count
return collections.OrderedDict(sorted(vocab.items(), key=lambda x: x[1], reverse=True))
def text_to_rank(dataset, _vocab, desired_vocab_size=5000):
'''
:param dataset: the text from load_data
:vocab: a _ordered_ dictionary of vocab words and counts from get_vocab
:param desired_vocab_size: the desired vocabulary size
words no longer in vocab become UUUNNNKKK
:return: the text corpus with words mapped to their vocab rank,
with all sufficiently infrequent words mapped to UUUNNNKKK; UUUNNNKKK has rank desired_vocab_size
(the infrequent word cutoff is determined by desired_vocab size)
'''
_dataset = dataset[:] # aliasing safeguard
vocab_ordered = list(_vocab)
count_cutoff = _vocab[vocab_ordered[desired_vocab_size-1]] # get word by its rank and map to its count
word_to_rank = {}
for i in range(len(vocab_ordered)):
# we add one to make room for any future padding symbol with value 0
word_to_rank[vocab_ordered[i]] = i + 1
# we need to ensure that other words below the word on the edge of our desired_vocab size
# are not also on the count cutoff, so we subtract a bit
# this is likely quicker than adding another preventative if case
for i in range(len(vocab_ordered[desired_vocab_size:])):
_vocab[vocab_ordered[desired_vocab_size+i]] -= 0.1
for i in range(len(_dataset)):
example = _dataset[i]
example_as_list = example.split()
for j in range(len(example_as_list)):
try:
if _vocab[example_as_list[j]] >= count_cutoff:
example_as_list[j] = word_to_rank[example_as_list[j]]
else:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
except:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
_dataset[i] = example_as_list
return _dataset
# taken from keras
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
'''Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
# //////////////////////// loading data ////////////////////////
max_example_len = 30
batch_size = 50
embedding_dims = 100
vocab_size = 10000
init_lr = 5e-4
reg_str = 1e-5
num_epochs = 5
print('Loading Data')
X_train, Y_train = load_data('./data/SST/senti.binary.train')
X_dev, Y_dev = load_data('./data/SST/senti.binary.dev')
X_test, Y_test = load_data('./data/SST/senti.binary.test')
num_classes = 2
vocab = get_vocab(X_train)
X_train = text_to_rank(X_train, vocab, vocab_size)
X_dev = text_to_rank(X_dev, vocab, vocab_size)
X_test = text_to_rank(X_test, vocab, vocab_size)
X_train = np.array(pad_sequences(X_train, maxlen=max_example_len), dtype=np.long)
X_dev = np.array(pad_sequences(X_dev, maxlen=max_example_len), dtype=np.long)
X_test = np.array(pad_sequences(X_test, maxlen=max_example_len), dtype=np.long)
Y_train = np.array(Y_train, dtype=np.long)
Y_dev = np.array(Y_dev, dtype=np.long)
Y_test = np.array(Y_test, dtype=np.long)
print('Data loaded')
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
examples = np.copy(X_train)
labels = np.copy(Y_train)
if merge_valset:
examples = np.concatenate([examples, np.copy(X_dev)], axis=0)
labels = np.concatenate([labels, np.copy(Y_dev)])
indices = np.arange(len(labels))
np.random.shuffle(indices)
examples = examples[indices]
labels = labels[indices]
num_gold = int(len(labels)*gold_fraction)
num_silver = len(labels) - num_gold
for i in range(num_silver):
labels[i] = np.random.choice(num_classes, p=corruption_matrix[labels[i]])
dataset = {'x': examples, 'y': labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
silver = {'x': dataset['x'][:num_silver], 'y': dataset['y'][:num_silver]}
# for convex combo net
iter = 0
indices = np.arange(num_gold)
while True:
if len(np.unique(gold['y'][indices][:num_gold // 4])) == num_classes:
gold_train = {'x': gold['x'][indices][num_gold // 4:], 'y': gold['y'][indices][num_gold // 4:]}
gold_val = {'x': gold['x'][indices][:num_gold // 4], 'y': gold['y'][indices][:num_gold // 4]}
print('Successfully split gold into a train and val set with all classes in the val set')
break
else:
np.random.shuffle(indices)
iter += 1
if iter == 100:
assert False, 'Failed to split gold data'
return dataset, gold, num_gold, num_silver, gold_train, gold_val, silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
# //////////////////////// defining graph ////////////////////////
class WordAveragingLinear(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Embedding(vocab_size+1, embedding_dims, padding_idx=0)
self.out = nn.Linear(embedding_dims, num_classes)
self.init_weights()
def init_weights(self):
self.embedding.weight.data.uniform_(-np.sqrt(6. / (vocab_size+1 + embedding_dims)),
np.sqrt(6. / (vocab_size+1 + embedding_dims)))
self.out.weight.data.normal_(0, 1 / np.sqrt(embedding_dims))
self.out.bias.data.zero_()
def forward(self, x):
return self.out(self.embedding(x.view(-1, max_example_len)).mean(1))
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = WordAveragingLinear().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=init_lr, weight_decay=0)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver, gold_train, gold_val, silver = prepare_data(C, gold_fraction)
# //////////////////////// train net on clean ////////////////////////
clean_net = WordAveragingLinear().cuda()
optimizer_cn = torch.optim.Adam(clean_net.parameters(), lr=init_lr, weight_decay=0)
num_examples = len(gold_train['y'])
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(max(num_epochs, 1 + 2000//num_batches)): # at least 2000 updates
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = gold_train['x'][indices[offset:offset + batch_size]]
y_batch = gold_train['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = clean_net(data)
# backward
l2_loss = (clean_net.out.weight**2).sum() / 2
loss = F.cross_entropy(output, target) + (reg_str * l2_loss)
optimizer_cn.zero_grad()
loss.backward()
optimizer_cn.step()
clean_net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = clean_net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
gold_only_acc = correct / len(Y_test)
print('Gold only:', gold_only_acc)
# //////////////////////// train for estimation ////////////////////////
num_examples = num_silver
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
l2_loss = (net.out.weight**2).sum() / 2
loss = F.cross_entropy(output, target) + (reg_str * l2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
baseline_acc = correct / len(Y_test)
# //////////////////////// estimate C ////////////////////////
if method == 'ours' or method == 'combo':
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
indices = np.arange(len(gold['y']))[gold['y'] == label]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
elif method == 'forward' or method == 'forward_gold':
probs = F.softmax(net(V(torch.from_numpy(dataset['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
class_probs = probs[:,label]
thresh = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= thresh] = 0
C_hat[label] = probs[np.argsort(class_probs)][-1]
elif method == 'ideal': C_hat = C
elif method == 'confusion':
# directly estimate confusion matrix on gold
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(gold['y'])):
C_hat[gold['y'][i], preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
print('True C:', np.round(C, decimals=3))
print('C_hat:', np.round(C_hat, decimals=3))
# //////////////////////// estimate lambda ////////////////////////
# /////////// getting average precision of clean_net ///////////
tp_count = np.zeros(num_classes)
tp_fp_count = np.zeros(num_classes)
for i in range(len(gold_val['y'])):
data, target = gold_val['x'][i], np.array([gold_val['y'][i]])
data, target = V(torch.from_numpy(data).cuda(), volatile=True), \
V(torch.LongTensor(target).cuda(), volatile=True)
# forward
output = clean_net(data)
# average precision
pred = output.data.max(0)[1]
batch_correct = pred.eq(target.data)
for i in range(len(batch_correct)):
tp_count[pred[i]] += batch_correct[i]
tp_fp_count[pred[i]] += 1
precisions = tp_count / (tp_fp_count + 1e-8)
clean_ap = np.mean(precisions)
# /////////// getting average precision of noisy labeling ///////////
y_br = np.zeros(num_classes)
y_tilde_br = np.zeros(num_classes)
for i in range(len(gold['y'])):
y_br[gold['y'][i]] += 1
for i in range(len(silver['y'])):
y_tilde_br[silver['y'][i]] += 1
y_br /= np.sum(y_br)
y_tilde_br += 1e-12
y_tilde_br /= np.sum(y_tilde_br)
# print(y_br)
# print(y_tilde_br)
# print(np.unique(silver['y']))
C2_hat = (C_hat.T * y_br.reshape(1, num_classes)) / y_tilde_br.reshape(num_classes, 1)
C2_hat += 1e-12
C2_hat /= np.sum(C2_hat, axis=1)
# print(C2_hat.sum(1))
# print(C2_hat)
# print(C_hat.sum(1))
# print(C_hat)
noisy_ap = np.mean(np.diag(C2_hat))
print('Clean AP: {}, Noisy AP: {}'.format(clean_ap, noisy_ap))
if args.lambda_choice == 'theirs':
combo_lambda = float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '1_minus_theirs':
combo_lambda = 1 - float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '0.5':
combo_lambda = 0.5
print('Combo Lambda: {}'.format(combo_lambda))
C_hat = V(torch.eye(num_classes)).cuda()
# //////////////////////// retrain with correction ////////////////////////
net.train()
net.init_weights()
optimizer = torch.optim.Adam(net.parameters(), lr=init_lr, weight_decay=0)
if method == 'ours' or method == 'ideal' or method == 'confusion' or method == 'forward_gold':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
current_indices = indices[offset:offset + batch_size]
data = dataset['x'][current_indices]
target = dataset['y'][current_indices]
gold_indices = current_indices >= num_silver
silver_indices = current_indices < num_silver
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices]
data_g, target_g = V(torch.LongTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.LongTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
output_s -= torch.max(output_s, 1, keepdim=True)[0]
output_s = torch.log(torch.mm(F.softmax(output_s), C_hat))
loss_s = F.cross_entropy(output_s, target_s, size_average=False)
# pre1 = C_hat.t()[torch.cuda.LongTensor(target_s.data)]
# pre2 = torch.mul(F.softmax(output_s), pre1)
# loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
l2_loss = (net.out.weight**2).sum() / 2
loss = (loss_g + loss_s)/batch_size + (reg_str * l2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif method == 'combo':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
target_one_hot = np.zeros((len(y_batch), num_classes))
target_one_hot[np.arange(len(y_batch)), y_batch] = 1
target_one_hot = target_one_hot.astype(np.float32)
data, target_one_hot = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(target_one_hot).cuda())
target_soft = F.softmax(clean_net(data))
target = (combo_lambda * target_one_hot) + ((1 - combo_lambda) * target_soft)
# forward
output = torch.mm(F.softmax(net(data)), C_hat)
l2_loss = (net.out.weight**2).sum() / 2
loss = -(target * torch.log(output)).sum(1).mean(0) + (reg_str * l2_loss)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# //////////////////////// evaluate method ////////////////////////
net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(Y_test)
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method + '_' + args.corruption_type
results = {}
for gold_fraction in [0.001, 0.01, 0.05]:
results[gold_fraction] = {}
for corruption_level in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 23,625 | 37.478827 | 134 | py |
glc | glc-master/SST/SST_gold_only.py | import numpy as np
import re
import collections
import pickle
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='sst label corruption experiments')
parser.add_argument('--method', default='gold_only', type=str, choices=['gold_only'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
args = parser.parse_args()
print(args)
print('CUDA available:', torch.cuda.is_available())
def load_data(filename='./data/SST/senti.train.onlyroot'):
'''
:param filename: the system location of the data to load
:return: the text (x) and its label (y)
the text is a list of words and is not processed
'''
# stop words taken from nltk
stop_words = ['i','me','my','myself','we','our','ours','ourselves','you','your','yours',
'yourself','yourselves','he','him','his','himself','she','her','hers','herself',
'it','its','itself','they','them','their','theirs','themselves','what','which',
'who','whom','this','that','these','those','am','is','are','was','were','be',
'been','being','have','has','had','having','do','does','did','doing','a','an',
'the','and','but','if','or','because','as','until','while','of','at','by','for',
'with','about','against','between','into','through','during','before','after',
'above','below','to','from','up','down','in','out','on','off','over','under',
'again','further','then','once','here','there','when','where','why','how','all',
'any','both','each','few','more','most','other','some','such','no','nor','not',
'only','own','same','so','than','too','very','s','t','can','will','just','don',
'should','now','d','ll','m','o','re','ve','y','ain','aren','couldn','didn',
'doesn','hadn','hasn','haven','isn','ma','mightn','mustn','needn','shan',
'shouldn','wasn','weren','won','wouldn']
x, y = [], []
with open(filename, "r") as f:
for line in f:
line = re.sub(r'\W+', ' ', line).strip().lower() # perhaps don't make words lowercase?
x.append(line[:-1])
x[-1] = ' '.join(word for word in x[-1].split() if word not in stop_words)
y.append(line[-1])
return x, np.array(y, dtype=int)
def get_vocab(dataset):
'''
:param dataset: the text from load_data
:return: a _ordered_ dictionary from words to counts
'''
vocab = {}
# create a counter for each word
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] = 0
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] += 1
# sort from greatest to least by count
return collections.OrderedDict(sorted(vocab.items(), key=lambda x: x[1], reverse=True))
def text_to_rank(dataset, _vocab, desired_vocab_size=5000):
'''
:param dataset: the text from load_data
:vocab: a _ordered_ dictionary of vocab words and counts from get_vocab
:param desired_vocab_size: the desired vocabulary size
words no longer in vocab become UUUNNNKKK
:return: the text corpus with words mapped to their vocab rank,
with all sufficiently infrequent words mapped to UUUNNNKKK; UUUNNNKKK has rank desired_vocab_size
(the infrequent word cutoff is determined by desired_vocab size)
'''
_dataset = dataset[:] # aliasing safeguard
vocab_ordered = list(_vocab)
count_cutoff = _vocab[vocab_ordered[desired_vocab_size-1]] # get word by its rank and map to its count
word_to_rank = {}
for i in range(len(vocab_ordered)):
# we add one to make room for any future padding symbol with value 0
word_to_rank[vocab_ordered[i]] = i + 1
# we need to ensure that other words below the word on the edge of our desired_vocab size
# are not also on the count cutoff, so we subtract a bit
# this is likely quicker than adding another preventative if case
for i in range(len(vocab_ordered[desired_vocab_size:])):
_vocab[vocab_ordered[desired_vocab_size+i]] -= 0.1
for i in range(len(_dataset)):
example = _dataset[i]
example_as_list = example.split()
for j in range(len(example_as_list)):
try:
if _vocab[example_as_list[j]] >= count_cutoff:
example_as_list[j] = word_to_rank[example_as_list[j]]
else:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
except:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
_dataset[i] = example_as_list
return _dataset
# taken from keras
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
'''Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
# //////////////////////// loading data ////////////////////////
max_example_len = 30
batch_size = 50
embedding_dims = 100
vocab_size = 10000
init_lr = 5e-4
reg_str = 1e-5
num_epochs = 5
print('Loading Data')
X_train, Y_train = load_data('./data/SST/senti.binary.train')
X_dev, Y_dev = load_data('./data/SST/senti.binary.dev')
X_test, Y_test = load_data('./data/SST/senti.binary.test')
num_classes = 2
vocab = get_vocab(X_train)
X_train = text_to_rank(X_train, vocab, vocab_size)
X_dev = text_to_rank(X_dev, vocab, vocab_size)
X_test = text_to_rank(X_test, vocab, vocab_size)
X_train = np.array(pad_sequences(X_train, maxlen=max_example_len), dtype=np.long)
X_dev = np.array(pad_sequences(X_dev, maxlen=max_example_len), dtype=np.long)
X_test = np.array(pad_sequences(X_test, maxlen=max_example_len), dtype=np.long)
Y_train = np.array(Y_train, dtype=np.long)
Y_dev = np.array(Y_dev, dtype=np.long)
Y_test = np.array(Y_test, dtype=np.long)
print('Data loaded')
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
examples = np.copy(X_train)
labels = np.copy(Y_train)
if merge_valset:
examples = np.concatenate([examples, np.copy(X_dev)], axis=0)
labels = np.concatenate([labels, np.copy(Y_dev)])
indices = np.arange(len(labels))
np.random.shuffle(indices)
examples = examples[indices]
labels = labels[indices]
num_gold = int(len(labels)*gold_fraction)
num_silver = len(labels) - num_gold
for i in range(num_silver):
labels[i] = np.random.choice(num_classes, p=corruption_matrix[labels[i]])
dataset = {'x': examples, 'y': labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
return dataset, gold, num_gold, num_silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
# //////////////////////// defining graph ////////////////////////
class WordAveragingLinear(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Embedding(vocab_size+1, embedding_dims, padding_idx=0)
self.out = nn.Linear(embedding_dims, num_classes)
self.init_weights()
def init_weights(self):
self.embedding.weight.data.uniform_(-np.sqrt(6. / (vocab_size+1 + embedding_dims)),
np.sqrt(6. / (vocab_size+1 + embedding_dims)))
self.out.weight.data.normal_(0, 1 / np.sqrt(embedding_dims))
self.out.bias.data.zero_()
def forward(self, x):
return self.out(self.embedding(x).mean(1))
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = WordAveragingLinear().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=init_lr, weight_decay=0)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver = prepare_data(C, gold_fraction)
# //////////////////////// train for estimation ////////////////////////
num_examples = num_gold
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(max(num_epochs, 1 + 2000//num_batches)): # at least 2000 updates
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
l2_loss = (net.out.weight**2).sum() / 2
loss = F.cross_entropy(output, target) + (reg_str * l2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(X_test).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(Y_test)
baseline_acc = 0
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method
results = {}
for gold_fraction in [0.001, 0.01, 0.05]:
results[gold_fraction] = {}
for corruption_level in [0]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 13,290 | 37.302594 | 114 | py |
glc | glc-master/MNIST/MNIST_gold_only.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
import pickle
from tensorflow.examples.tutorials.mnist import input_data
import argparse
mnist = input_data.read_data_sets(train_dir='mnist', one_hot=False)
parser = argparse.ArgumentParser(description='MNIST label corruption experiments')
parser.add_argument('--method', default='gold_only', type=str, choices=['gold_only'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
args = parser.parse_args()
print(args)
print('CUDA available:', torch.cuda.is_available())
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
mnist_images = np.copy(mnist.train.images)
mnist_labels = np.copy(mnist.train.labels)
if merge_valset:
mnist_images = np.concatenate([mnist_images, np.copy(mnist.validation.images)], axis=0)
mnist_labels = np.concatenate([mnist_labels, np.copy(mnist.validation.labels)])
indices = np.arange(len(mnist_labels))
np.random.shuffle(indices)
mnist_images = mnist_images[indices]
mnist_labels = mnist_labels[indices].astype(np.long)
num_gold = int(len(mnist_labels)*gold_fraction)
num_silver = len(mnist_labels) - num_gold
for i in range(num_silver):
mnist_labels[i] = np.random.choice(num_classes, p=corruption_matrix[mnist_labels[i]])
dataset = {'x': mnist_images, 'y': mnist_labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
return dataset, gold, num_gold, num_silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
# //////////////////////// defining model ////////////////////////
reg_str = 1e-6
num_epochs = 10
batch_size = 32
num_classes = 10
class ThreeLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, num_classes)
)
self.init_weights()
def init_weights(self):
self.main[0].weight.data.normal_(0, 1/np.sqrt(784))
self.main[0].bias.data.zero_()
self.main[2].weight.data.normal_(0, 1/np.sqrt(128))
self.main[2].bias.data.zero_()
self.main[4].weight.data.normal_(0, 1/np.sqrt(128))
self.main[4].bias.data.zero_()
def forward(self, x):
return self.main(x)
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = ThreeLayerNet().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=reg_str)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver = prepare_data(C, gold_fraction)
# //////////////////////// train for estimation ////////////////////////
num_examples = num_gold
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(max(num_epochs, 1 + 2000//num_batches)): # at least 2000 updates
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = gold['x'][indices[offset:offset + batch_size]]
y_batch = gold['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(mnist.test.images).cuda(), volatile=True),\
V(torch.from_numpy(mnist.test.labels.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(mnist.test.labels)
baseline_acc = 0 # placeholder
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method
results = {}
for gold_fraction in [0.001, 0.01, 0.05]:
results[gold_fraction] = {}
for corruption_level in [0]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 5,929 | 33.277457 | 113 | py |
glc | glc-master/MNIST/MNIST_experiments_pytorch.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
import pickle
from tensorflow.examples.tutorials.mnist import input_data
import argparse
mnist = input_data.read_data_sets(train_dir='mnist', one_hot=False)
parser = argparse.ArgumentParser(description='MNIST label corruption experiments')
parser.add_argument('--method', default='ours', type=str, choices=['ours', 'forward', 'forward_gold', 'ideal', 'confusion'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
args = parser.parse_args()
print(args)
print('CUDA available:', torch.cuda.is_available())
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
mnist_images = np.copy(mnist.train.images)
mnist_labels = np.copy(mnist.train.labels)
if merge_valset:
mnist_images = np.concatenate([mnist_images, np.copy(mnist.validation.images)], axis=0)
mnist_labels = np.concatenate([mnist_labels, np.copy(mnist.validation.labels)])
indices = np.arange(len(mnist_labels))
np.random.shuffle(indices)
mnist_images = mnist_images[indices]
mnist_labels = mnist_labels[indices].astype(np.long)
num_gold = int(len(mnist_labels)*gold_fraction)
num_silver = len(mnist_labels) - num_gold
for i in range(num_silver):
mnist_labels[i] = np.random.choice(num_classes, p=corruption_matrix[mnist_labels[i]])
dataset = {'x': mnist_images, 'y': mnist_labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
return dataset, gold, num_gold, num_silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
# //////////////////////// defining model ////////////////////////
reg_str = 1e-6
num_epochs = 10
batch_size = 32
num_classes = 10
class ThreeLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, num_classes)
)
self.init_weights()
def init_weights(self):
self.main[0].weight.data.normal_(0, 1/np.sqrt(784))
self.main[0].bias.data.zero_()
self.main[2].weight.data.normal_(0, 1/np.sqrt(128))
self.main[2].bias.data.zero_()
self.main[4].weight.data.normal_(0, 1/np.sqrt(128))
self.main[4].bias.data.zero_()
def forward(self, x):
return self.main(x)
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = ThreeLayerNet().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=reg_str)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver = prepare_data(C, gold_fraction)
# //////////////////////// train for estimation ////////////////////////
if method == 'ours' or method == 'confusion' or method == 'forward_gold':
num_examples = num_silver
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(mnist.test.images).cuda(), volatile=True),\
V(torch.from_numpy(mnist.test.labels.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
baseline_acc = correct / len(mnist.test.labels)
# //////////////////////// estimate C ////////////////////////
if method == 'ours':
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
indices = np.arange(len(gold['y']))[gold['y'] == label]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
elif method == 'forward' or method == 'forward_gold':
probs = F.softmax(net(V(torch.from_numpy(dataset['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
class_probs = probs[:,label]
thresh = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= thresh] = 0
C_hat[label] = probs[np.argsort(class_probs)][-1]
elif method == 'ideal': C_hat = C
elif method == 'confusion':
# directly estimate confusion matrix on gold
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(gold['y'])):
C_hat[gold['y'][i], preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01
print('True C:', np.round(C, decimals=3))
print('C_hat:', np.round(C_hat, decimals=3))
C_hat = V(torch.from_numpy(C_hat.astype(np.float32))).cuda()
# //////////////////////// retrain with correction ////////////////////////
net.train()
net.init_weights()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=reg_str)
if method == 'ours' or method == 'ideal' or method == 'confusion' or method == 'forward_gold':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
current_indices = indices[offset:offset + batch_size]
data = dataset['x'][current_indices]
target = dataset['y'][current_indices]
gold_indices = current_indices >= num_silver
silver_indices = current_indices < num_silver
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices]
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat.t()[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/batch_size
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
for epoch in range(num_epochs):
indices = np.arange(num_examples)
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
pre1 = C_hat.t()[torch.cuda.LongTensor(target.data)]
pre2 = torch.mul(F.softmax(output), pre1)
loss = -(torch.log(pre2.sum(1))).mean(0)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# //////////////////////// evaluate method ////////////////////////
net.eval()
data, target = V(torch.from_numpy(mnist.test.images).cuda(), volatile=True),\
V(torch.from_numpy(mnist.test.labels.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(mnist.test.labels)
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method + '_' + args.corruption_type
results = {}
for gold_fraction in [0.001, 0.01, 0.05]:
results[gold_fraction] = {}
for corruption_level in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 11,405 | 35.557692 | 124 | py |
glc | glc-master/MNIST/MNIST_convex_combo.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
import pickle
from tensorflow.examples.tutorials.mnist import input_data
import argparse
mnist = input_data.read_data_sets(train_dir='mnist', one_hot=False)
parser = argparse.ArgumentParser(description='MNIST label corruption experiments')
parser.add_argument('--method', default='combo', type=str, choices=['ours', 'forward', 'forward_gold', 'ideal', 'confusion', 'combo'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
parser.add_argument('--lambda_choice', default='theirs', choices=['theirs', '1_minus_theirs', '0.5'])
args = parser.parse_args()
print(args)
print('CUDA available:', torch.cuda.is_available())
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
mnist_images = np.copy(mnist.train.images)
mnist_labels = np.copy(mnist.train.labels)
if merge_valset:
mnist_images = np.concatenate([mnist_images, np.copy(mnist.validation.images)], axis=0)
mnist_labels = np.concatenate([mnist_labels, np.copy(mnist.validation.labels)])
indices = np.arange(len(mnist_labels))
np.random.shuffle(indices)
mnist_images = mnist_images[indices]
mnist_labels = mnist_labels[indices].astype(np.long)
num_gold = int(len(mnist_labels)*gold_fraction)
num_silver = len(mnist_labels) - num_gold
for i in range(num_silver):
mnist_labels[i] = np.random.choice(num_classes, p=corruption_matrix[mnist_labels[i]])
dataset = {'x': mnist_images, 'y': mnist_labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
silver = {'x': dataset['x'][:num_silver], 'y': dataset['y'][:num_silver]}
# for convex combo net
iter = 0
indices = np.arange(num_gold)
while True:
if len(np.unique(gold['y'][indices][:num_gold // 4])) == num_classes:
gold_train = {'x': gold['x'][indices][num_gold // 4:], 'y': gold['y'][indices][num_gold // 4:]}
gold_val = {'x': gold['x'][indices][:num_gold // 4], 'y': gold['y'][indices][:num_gold // 4]}
print('Successfully split gold into a train and val set with all classes in the val set')
break
else:
np.random.shuffle(indices)
iter += 1
if iter == 100:
assert False, 'Failed to split gold data'
return dataset, gold, num_gold, num_silver, gold_train, gold_val, silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
# //////////////////////// defining model ////////////////////////
learning_rate = 0.001
reg_str = 1e-6
num_epochs = 10
batch_size = 32
num_classes = 10
class ThreeLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, num_classes)
)
self.init_weights()
def init_weights(self):
self.main[0].weight.data.normal_(0, 1/np.sqrt(784))
self.main[0].bias.data.zero_()
self.main[2].weight.data.normal_(0, 1/np.sqrt(128))
self.main[2].bias.data.zero_()
self.main[4].weight.data.normal_(0, 1/np.sqrt(128))
self.main[4].bias.data.zero_()
def forward(self, x):
return self.main(x)
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = ThreeLayerNet().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=reg_str)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver, gold_train, gold_val, silver = prepare_data(C, gold_fraction)
# //////////////////////// train net on clean ////////////////////////
clean_net = ThreeLayerNet().cuda()
optimizer_cn = torch.optim.Adam(clean_net.parameters(), lr=learning_rate, weight_decay=reg_str)
num_examples = len(gold_train['y'])
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(max(num_epochs, 1 + 2000//num_batches)): # at least 2000 updates
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = gold_train['x'][indices[offset:offset + batch_size]]
y_batch = gold_train['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = clean_net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer_cn.zero_grad()
loss.backward()
optimizer_cn.step()
clean_net.eval()
data, target = V(torch.from_numpy(mnist.test.images).cuda(), volatile=True),\
V(torch.from_numpy(mnist.test.labels.astype(np.long)).cuda(), volatile=True)
output = clean_net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
gold_only_acc = correct / len(mnist.test.labels)
print('Gold only:', gold_only_acc)
# //////////////////////// train for estimation ////////////////////////
num_examples = num_silver
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(mnist.test.images).cuda(), volatile=True),\
V(torch.from_numpy(mnist.test.labels.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
baseline_acc = correct / len(mnist.test.labels)
# //////////////////////// estimate C ////////////////////////
if method == 'ours' or method == 'combo':
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
indices = np.arange(len(gold['y']))[gold['y'] == label]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
elif method == 'forward' or method == 'forward_gold':
probs = F.softmax(net(V(torch.from_numpy(dataset['x']).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
class_probs = probs[:,label]
thresh = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= thresh] = 0
C_hat[label] = probs[np.argsort(class_probs)][-1]
elif method == 'ideal': C_hat = C
elif method == 'confusion':
# directly estimate confusion matrix on gold
probs = F.softmax(net(V(torch.from_numpy(gold['x']).cuda(), volatile=True))).data.cpu().numpy()
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(gold['y'])):
C_hat[gold['y'][i], preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
print('True C:', np.round(C, decimals=3))
print('C_hat:', np.round(C_hat, decimals=3))
# //////////////////////// estimate lambda ////////////////////////
# /////////// getting average precision of clean_net ///////////
tp_count = np.zeros(num_classes)
tp_fp_count = np.zeros(num_classes)
for i in range(len(gold_val['y'])):
data, target = gold_val['x'][i], np.array([gold_val['y'][i]])
data, target = V(torch.FloatTensor(data).cuda(), volatile=True), \
V(torch.LongTensor(target).cuda(), volatile=True)
# forward
output = clean_net(data)
# average precision
pred = output.data.max(0)[1]
batch_correct = pred.eq(target.data)
for i in range(len(batch_correct)):
tp_count[pred[i]] += batch_correct[i]
tp_fp_count[pred[i]] += 1
precisions = tp_count / (tp_fp_count + 1e-8)
clean_ap = np.mean(precisions)
# /////////// getting average precision of noisy labeling ///////////
y_br = np.zeros(num_classes)
y_tilde_br = np.zeros(num_classes)
for i in range(len(gold['y'])):
y_br[gold['y'][i]] += 1
for i in range(len(silver['y'])):
y_tilde_br[silver['y'][i]] += 1
y_br /= np.sum(y_br)
y_tilde_br += 1e-12
y_tilde_br /= np.sum(y_tilde_br)
# print(y_br)
# print(y_tilde_br)
# print(np.unique(silver['y']))
C2_hat = (C_hat.T * y_br.reshape(1, num_classes)) / y_tilde_br.reshape(num_classes, 1)
C2_hat += 1e-12
C2_hat /= np.sum(C2_hat, axis=1)
# print(C2_hat.sum(1))
# print(C2_hat)
# print(C_hat.sum(1))
# print(C_hat)
noisy_ap = np.mean(np.diag(C2_hat))
print('Clean AP: {}, Noisy AP: {}'.format(clean_ap, noisy_ap))
if args.lambda_choice == 'theirs':
combo_lambda = float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '1_minus_theirs':
combo_lambda = 1 - float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '0.5':
combo_lambda = 0.5
print('Combo Lambda: {}'.format(combo_lambda))
C_hat = V(torch.eye(num_classes)).cuda()
# //////////////////////// retrain with correction ////////////////////////
net.train()
net.init_weights()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=reg_str)
if method == 'ours' or method == 'ideal' or method == 'confusion' or method == 'forward_gold':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
current_indices = indices[offset:offset + batch_size]
data = dataset['x'][current_indices]
target = dataset['y'][current_indices]
gold_indices = current_indices >= num_silver
silver_indices = current_indices < num_silver
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices]
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.LongTensor(target_g).cuda())
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.LongTensor(target_s).cuda())
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat.t()[target_s]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/batch_size
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif method == 'combo':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
for epoch in range(num_epochs):
indices = np.arange(num_examples)
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = dataset['x'][indices[offset:offset + batch_size]]
y_batch = dataset['y'][indices[offset:offset + batch_size]]
target_one_hot = np.zeros((len(y_batch), num_classes))
target_one_hot[np.arange(len(y_batch)), y_batch] = 1
target_one_hot = target_one_hot.astype(np.float32)
data, target_one_hot = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(target_one_hot).cuda())
target_soft = F.softmax(clean_net(data))
target = (combo_lambda * target_one_hot) + ((1 - combo_lambda) * target_soft)
# forward
output = torch.mm(F.softmax(net(data)), C_hat)
loss = -(target * torch.log(output)).sum(1).mean(0)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# //////////////////////// evaluate method ////////////////////////
net.eval()
data, target = V(torch.from_numpy(mnist.test.images).cuda(), volatile=True),\
V(torch.from_numpy(mnist.test.labels.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(mnist.test.labels)
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method + '_' + args.corruption_type
results = {}
for gold_fraction in [0.001, 0.01, 0.05]:
results[gold_fraction] = {}
for corruption_level in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 15,832 | 35.231121 | 134 | py |
glc | glc-master/Twitter/Twitter_convex_combo.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
import pickle
import argparse
from helper_functions_twitter import *
parser = argparse.ArgumentParser(description='Twitter label corruption experiments')
parser.add_argument('--method', default='combo', type=str, choices=['ours', 'forward', 'forward_gold', 'ideal', 'confusion', 'combo'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
parser.add_argument('--lambda_choice', default='theirs', choices=['theirs', '1_minus_theirs', '0.5'])
args = parser.parse_args()
print(args)
window_size = 1
# note that we encode the tags with numbers for later convenience
tag_to_number = {
u'N': 0, u'O': 1, u'S': 2, u'^': 3, u'Z': 4, u'L': 5, u'M': 6,
u'V': 7, u'A': 8, u'R': 9, u'!': 10, u'D': 11, u'P': 12, u'&': 13, u'T': 14,
u'X': 15, u'Y': 16, u'#': 17, u'@': 18, u'~': 19, u'U': 20, u'E': 21, u'$': 22,
u',': 23, u'G': 24
}
embeddings = embeddings_to_dict('./data/Tweets/embeddings-twitter.txt')
vocab = embeddings.keys()
# we replace <s> with </s> since it has no embedding, and </s> is a better embedding than UNK
X_train, Y_train = data_to_mat('./data/Tweets/tweets-train.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
X_dev, Y_dev = data_to_mat('./data/Tweets/tweets-dev.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
X_test, Y_test = data_to_mat('./data/Tweets/tweets-devtest.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
twitter_tweets = np.copy(X_train)
twitter_labels = np.copy(Y_train)
if merge_valset:
twitter_tweets = np.concatenate([twitter_tweets, np.copy(X_dev)], axis=0)
twitter_labels = np.concatenate([twitter_labels, np.copy(Y_dev)])
indices = np.arange(len(twitter_labels))
np.random.shuffle(indices)
twitter_tweets = twitter_tweets[indices]
twitter_labels = twitter_labels[indices].astype(np.long)
num_gold = int(len(twitter_labels)*gold_fraction)
num_silver = len(twitter_labels) - num_gold
for i in range(num_silver):
twitter_labels[i] = np.random.choice(num_classes, p=corruption_matrix[twitter_labels[i]])
dataset = {'x': twitter_tweets, 'y': twitter_labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
silver = {'x': dataset['x'][:num_silver], 'y': dataset['y'][:num_silver]}
# for convex combo net
iter = 0
indices = np.arange(num_gold)
while True:
gold_train = {'x': gold['x'][indices][num_gold // 2:], 'y': gold['y'][indices][num_gold // 2:]}
gold_val = {'x': gold['x'][indices][:num_gold // 2], 'y': gold['y'][indices][:num_gold // 2]}
if len(np.unique(gold['y'][indices][:num_gold // 2])) == num_classes:
print('Successfully split gold into a train and val set with all classes in the val set')
break
else:
np.random.shuffle(indices)
iter += 1
if iter == 100:
print('Could not find a split into a gold train and val set with all classes in the val set. Continuing.')
break
# assert False, 'Failed to split gold data'
return dataset, gold, num_gold, num_silver, gold_train, gold_val, silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
learning_rate = 0.001
reg_str = 5e-5
num_epochs = 15
num_classes = 25
hidden_size = 256
batch_size = 64
embedding_dimension = 50
example_size = (2*window_size + 1)*embedding_dimension
init_lr = 0.001
num_examples = Y_train.shape[0]
num_batches = num_examples//batch_size
# //////////////////////// defining graph ////////////////////////
class ThreeLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(example_size, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, num_classes)
)
self.init_weights()
def init_weights(self):
self.main[0].weight.data.normal_(0, 1/np.sqrt(example_size))
self.main[0].bias.data.zero_()
self.main[2].weight.data.normal_(0, 1/np.sqrt(256))
self.main[2].bias.data.zero_()
self.main[4].weight.data.normal_(0, 1/np.sqrt(256))
self.main[4].bias.data.zero_()
def forward(self, x):
return self.main(x)
to_embeds = lambda x: word_list_to_embedding(x, embeddings, embedding_dimension)
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = ThreeLayerNet().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=reg_str)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver, gold_train, gold_val, silver = prepare_data(C, gold_fraction)
# //////////////////////// train net on clean ////////////////////////
clean_net = ThreeLayerNet().cuda()
optimizer_cn = torch.optim.Adam(clean_net.parameters(), lr=learning_rate, weight_decay=reg_str)
num_examples = num_gold
num_batches = num_examples//batch_size
indices = np.arange(num_examples) + num_silver
for epoch in range(max(num_epochs, 1 + 2000//num_batches)): # at least 2000 updates
# shuffle data every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = to_embeds(dataset['x'][indices[offset:offset + batch_size]])
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = clean_net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer_cn.zero_grad()
loss.backward()
optimizer_cn.step()
clean_net.eval()
data, target = V(torch.from_numpy(to_embeds(X_test)).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = clean_net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
gold_only_acc = correct / len(Y_test)
print('Gold only:', gold_only_acc)
# //////////////////////// train for estimation ////////////////////////
num_examples = num_silver
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
# shuffle data indices every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = to_embeds(dataset['x'][indices[offset:offset + batch_size]])
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(to_embeds(X_test)).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
baseline_acc = correct / len(Y_test)
# //////////////////////// estimate C ////////////////////////
if method == 'ours' or method == 'combo':
probs = F.softmax(net(V(torch.from_numpy(to_embeds(gold['x'])).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
indices = np.arange(len(gold['y']))[gold['y'] == label]
if indices.size == 0:
C_hat[label] = np.ones(num_classes) / num_classes # TODO: try a diagonal prior instead
else:
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
elif method == 'forward' or method == 'forward_gold':
probs = F.softmax(net(V(torch.from_numpy(to_embeds(dataset['x'])).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
class_probs = probs[:,label]
thresh = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= thresh] = 0
C_hat[label] = probs[np.argsort(class_probs)][-1]
elif method == 'ideal': C_hat = C
elif method == 'confusion':
# directly estimate confusion matrix on gold
probs = F.softmax(net(V(torch.from_numpy(to_embeds(gold['x'])).cuda(), volatile=True))).data.cpu().numpy()
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(gold['y'])):
C_hat[gold['y'][i], preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
print('True C:', np.round(C, decimals=3))
print('C_hat:', np.round(C_hat, decimals=3))
# //////////////////////// estimate lambda ////////////////////////
# /////////// getting average precision of clean_net ///////////
tp_count = np.zeros(num_classes)
tp_fp_count = np.zeros(num_classes)
for i in range(len(gold_val['y'])):
data, target = gold_val['x'][i], np.array([gold_val['y'][i]])
data = np.expand_dims(data, 0)
data, target = V(torch.FloatTensor(to_embeds(data)).cuda(), volatile=True), \
V(torch.LongTensor(target).cuda(), volatile=True)
# forward
output = clean_net(data)
# average precision
pred = output.data.max(0)[1]
batch_correct = pred.eq(target.data)
for i in range(len(batch_correct)):
tp_count[pred[i]] += batch_correct[i]
tp_fp_count[pred[i]] += 1
clean_ap = 0
num_classes_in_val = 0
for i in range(num_classes):
if tp_fp_count[i] > 0:
num_classes_in_val += 1
clean_ap += tp_count[i] / tp_fp_count[i]
print('Number of classes in gold val:', num_classes_in_val)
clean_ap /= num_classes_in_val
# /////////// getting average precision of noisy labeling ///////////
y_br = np.zeros(num_classes)
y_tilde_br = np.zeros(num_classes)
for i in range(len(gold['y'])):
y_br[gold['y'][i]] += 1
for i in range(len(silver['y'])):
y_tilde_br[silver['y'][i]] += 1
y_br /= np.sum(y_br)
y_tilde_br += 1e-12
y_tilde_br /= np.sum(y_tilde_br)
# print(y_br)
# print(y_tilde_br)
# print(np.unique(silver['y']))
C2_hat = (C_hat.T * y_br.reshape(1, num_classes)) / y_tilde_br.reshape(num_classes, 1)
C2_hat += 1e-12
C2_hat /= np.sum(C2_hat, axis=1)
# print(C2_hat.sum(1))
# print(C2_hat)
# print(C_hat.sum(1))
# print(C_hat)
noisy_ap = np.mean(np.diag(C2_hat))
print('Clean AP: {}, Noisy AP: {}'.format(clean_ap, noisy_ap))
if args.lambda_choice == 'theirs':
combo_lambda = float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '1_minus_theirs':
combo_lambda = 1 - float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '0.5':
combo_lambda = 0.5
print('Combo Lambda: {}'.format(combo_lambda))
C_hat = V(torch.eye(num_classes)).cuda()
# //////////////////////// retrain with correction ////////////////////////
net.train()
net.init_weights()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=reg_str)
if method == 'ours' or method == 'ideal' or method == 'confusion' or method == 'forward_gold':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
current_indices = indices[offset:offset + batch_size]
data = to_embeds(dataset['x'][current_indices])
target = dataset['y'][current_indices]
gold_indices = current_indices >= num_silver
silver_indices = current_indices < num_silver
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices]
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat.t()[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/batch_size
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif method == 'combo':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
for epoch in range(num_epochs):
indices = np.arange(num_examples)
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = to_embeds(dataset['x'][indices[offset:offset + batch_size]])
y_batch = dataset['y'][indices[offset:offset + batch_size]]
target_one_hot = np.zeros((len(y_batch), num_classes))
target_one_hot[np.arange(len(y_batch)), y_batch] = 1
target_one_hot = target_one_hot.astype(np.float32)
data, target_one_hot = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(target_one_hot).cuda())
target_soft = F.softmax(clean_net(data))
target = (combo_lambda * target_one_hot) + ((1 - combo_lambda) * target_soft)
# forward
output = torch.mm(F.softmax(net(data)), C_hat)
loss = -(target * torch.log(output)).sum(1).mean(0)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# //////////////////////// evaluate method ////////////////////////
net.eval()
data, target = V(torch.from_numpy(to_embeds(X_test)).cuda(), volatile=True), \
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(Y_test)
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method + '_' + args.corruption_type
results = {}
for gold_fraction in [0.01, 0.05, 0.25]:
results[gold_fraction] = {}
for corruption_level in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 17,589 | 35.569647 | 134 | py |
glc | glc-master/Twitter/Twitter_gold_only.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
import pickle
import argparse
from helper_functions_twitter import *
parser = argparse.ArgumentParser(description='Twitter label corruption experiments')
parser.add_argument('--method', default='gold_only', type=str, choices=['gold_only'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
args = parser.parse_args()
print(args)
window_size = 1
# note that we encode the tags with numbers for later convenience
tag_to_number = {
u'N': 0, u'O': 1, u'S': 2, u'^': 3, u'Z': 4, u'L': 5, u'M': 6,
u'V': 7, u'A': 8, u'R': 9, u'!': 10, u'D': 11, u'P': 12, u'&': 13, u'T': 14,
u'X': 15, u'Y': 16, u'#': 17, u'@': 18, u'~': 19, u'U': 20, u'E': 21, u'$': 22,
u',': 23, u'G': 24
}
embeddings = embeddings_to_dict('./data/Tweets/embeddings-twitter.txt')
vocab = embeddings.keys()
# we replace <s> with </s> since it has no embedding, and </s> is a better embedding than UNK
X_train, Y_train = data_to_mat('./data/Tweets/tweets-train.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
X_dev, Y_dev = data_to_mat('./data/Tweets/tweets-dev.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
X_test, Y_test = data_to_mat('./data/Tweets/tweets-devtest.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
twitter_tweets = np.copy(X_train)
twitter_labels = np.copy(Y_train)
if merge_valset:
twitter_tweets = np.concatenate([twitter_tweets, np.copy(X_dev)], axis=0)
twitter_labels = np.concatenate([twitter_labels, np.copy(Y_dev)])
indices = np.arange(len(twitter_labels))
np.random.shuffle(indices)
twitter_tweets = twitter_tweets[indices]
twitter_labels = twitter_labels[indices].astype(np.long)
num_gold = int(len(twitter_labels)*gold_fraction)
num_silver = len(twitter_labels) - num_gold
for i in range(num_silver):
twitter_labels[i] = np.random.choice(num_classes, p=corruption_matrix[twitter_labels[i]])
dataset = {'x': twitter_tweets, 'y': twitter_labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
return dataset, gold, num_gold, num_silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
reg_str = 5e-5
num_epochs = 15
num_classes = 25
hidden_size = 256
batch_size = 64
embedding_dimension = 50
example_size = (2*window_size + 1)*embedding_dimension
init_lr = 0.001
num_examples = Y_train.shape[0]
num_batches = num_examples//batch_size
# //////////////////////// defining graph ////////////////////////
class ThreeLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(example_size, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, num_classes)
)
self.init_weights()
def init_weights(self):
self.main[0].weight.data.normal_(0, 1/np.sqrt(example_size))
self.main[0].bias.data.zero_()
self.main[2].weight.data.normal_(0, 1/np.sqrt(256))
self.main[2].bias.data.zero_()
self.main[4].weight.data.normal_(0, 1/np.sqrt(256))
self.main[4].bias.data.zero_()
def forward(self, x):
return self.main(x)
to_embeds = lambda x: word_list_to_embedding(x, embeddings, embedding_dimension)
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = ThreeLayerNet().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=reg_str)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver = prepare_data(C, gold_fraction)
# //////////////////////// train for estimation ////////////////////////
num_examples = num_gold
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(max(num_epochs, 1 + 2000//num_batches)):
# shuffle data indices every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = to_embeds(dataset['x'][indices[offset:offset + batch_size]])
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(to_embeds(X_test)).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(Y_test)
baseline_acc = 0 # placeholder
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method
results = {}
for gold_fraction in [0.01, 0.05, 0.25]:
results[gold_fraction] = {}
for corruption_level in [0]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 7,050 | 34.079602 | 113 | py |
glc | glc-master/Twitter/Twitter_experiments_pytorch.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch.nn.functional as F
import pickle
import argparse
from helper_functions_twitter import *
parser = argparse.ArgumentParser(description='Twitter label corruption experiments')
parser.add_argument('--method', default='confusion', type=str, choices=['ours', 'forward', 'forward_gold', 'ideal', 'confusion'])
parser.add_argument('--corruption_type', default='flip_labels', type=str, choices=['uniform_mix', 'flip_labels'])
args = parser.parse_args()
print(args)
window_size = 1
# note that we encode the tags with numbers for later convenience
tag_to_number = {
u'N': 0, u'O': 1, u'S': 2, u'^': 3, u'Z': 4, u'L': 5, u'M': 6,
u'V': 7, u'A': 8, u'R': 9, u'!': 10, u'D': 11, u'P': 12, u'&': 13, u'T': 14,
u'X': 15, u'Y': 16, u'#': 17, u'@': 18, u'~': 19, u'U': 20, u'E': 21, u'$': 22,
u',': 23, u'G': 24
}
embeddings = embeddings_to_dict('./data/Tweets/embeddings-twitter.txt')
vocab = embeddings.keys()
# we replace <s> with </s> since it has no embedding, and </s> is a better embedding than UNK
X_train, Y_train = data_to_mat('./data/Tweets/tweets-train.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
X_dev, Y_dev = data_to_mat('./data/Tweets/tweets-dev.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
X_test, Y_test = data_to_mat('./data/Tweets/tweets-devtest.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
def prepare_data(corruption_matrix, gold_fraction=0.5, merge_valset=True):
np.random.seed(1)
twitter_tweets = np.copy(X_train)
twitter_labels = np.copy(Y_train)
if merge_valset:
twitter_tweets = np.concatenate([twitter_tweets, np.copy(X_dev)], axis=0)
twitter_labels = np.concatenate([twitter_labels, np.copy(Y_dev)])
indices = np.arange(len(twitter_labels))
np.random.shuffle(indices)
twitter_tweets = twitter_tweets[indices]
twitter_labels = twitter_labels[indices].astype(np.long)
num_gold = int(len(twitter_labels)*gold_fraction)
num_silver = len(twitter_labels) - num_gold
for i in range(num_silver):
twitter_labels[i] = np.random.choice(num_classes, p=corruption_matrix[twitter_labels[i]])
dataset = {'x': twitter_tweets, 'y': twitter_labels}
gold = {'x': dataset['x'][num_silver:], 'y': dataset['y'][num_silver:]}
return dataset, gold, num_gold, num_silver
def uniform_mix_C(mixing_ratio):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(1)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
reg_str = 5e-5
num_epochs = 15
num_classes = 25
hidden_size = 256
batch_size = 64
embedding_dimension = 50
example_size = (2*window_size + 1)*embedding_dimension
init_lr = 0.001
num_examples = Y_train.shape[0]
num_batches = num_examples//batch_size
# //////////////////////// defining graph ////////////////////////
class ThreeLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(example_size, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, num_classes)
)
self.init_weights()
def init_weights(self):
self.main[0].weight.data.normal_(0, 1/np.sqrt(example_size))
self.main[0].bias.data.zero_()
self.main[2].weight.data.normal_(0, 1/np.sqrt(256))
self.main[2].bias.data.zero_()
self.main[4].weight.data.normal_(0, 1/np.sqrt(256))
self.main[4].bias.data.zero_()
def forward(self, x):
return self.main(x)
to_embeds = lambda x: word_list_to_embedding(x, embeddings, embedding_dimension)
def train_and_test(method='ours', corruption_level=0, gold_fraction=0.5, get_C=uniform_mix_C):
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
net = ThreeLayerNet().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=reg_str)
C = get_C(corruption_level)
dataset, gold, num_gold, num_silver = prepare_data(C, gold_fraction)
# //////////////////////// train for estimation ////////////////////////
if method == 'ours' or method == 'confusion' or method == 'forward_gold':
num_examples = num_silver
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
# shuffle data indices every epoch
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = to_embeds(dataset['x'][indices[offset:offset + batch_size]])
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
# backward
loss = F.cross_entropy(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
net.eval()
data, target = V(torch.from_numpy(to_embeds(X_test)).cuda(), volatile=True),\
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
baseline_acc = correct / len(Y_test)
# //////////////////////// estimate C ////////////////////////
if method == 'ours':
probs = F.softmax(net(V(torch.from_numpy(to_embeds(gold['x'])).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
indices = np.arange(len(gold['y']))[gold['y'] == label]
if indices.size == 0:
C_hat[label] = np.ones(num_classes) / num_classes # TODO: try a diagonal prior instead
else:
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
elif method == 'forward' or method == 'forward_gold':
probs = F.softmax(net(V(torch.from_numpy(to_embeds(dataset['x'])).cuda(), volatile=True))).data.cpu().numpy()
C_hat = np.zeros((num_classes,num_classes))
for label in range(num_classes):
class_probs = probs[:,label]
thresh = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= thresh] = 0
C_hat[label] = probs[np.argsort(class_probs)][-1]
elif method == 'ideal': C_hat = C
elif method == 'confusion':
# directly estimate confusion matrix on gold
probs = F.softmax(net(V(torch.from_numpy(to_embeds(gold['x'])).cuda(), volatile=True))).data.cpu().numpy()
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(gold['y'])):
C_hat[gold['y'][i], preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01
#print('True C:', np.round(C, decimals=3))
#print('C_hat:', np.round(C_hat, decimals=3))
C_hat = V(torch.from_numpy(C_hat.astype(np.float32))).cuda()
# //////////////////////// retrain with correction ////////////////////////
net.train()
net.init_weights()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=reg_str)
if method == 'ours' or method == 'ideal' or method == 'confusion' or method == 'forward_gold':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
indices = np.arange(num_examples)
for epoch in range(num_epochs):
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
current_indices = indices[offset:offset + batch_size]
data = to_embeds(dataset['x'][current_indices])
target = dataset['y'][current_indices]
gold_indices = current_indices >= num_silver
silver_indices = current_indices < num_silver
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices]
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat.t()[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/batch_size
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif method == 'forward':
num_examples = dataset['y'].shape[0]
num_batches = num_examples//batch_size
for epoch in range(num_epochs):
indices = np.arange(num_examples)
np.random.shuffle(indices)
for i in range(num_batches):
offset = i * batch_size
x_batch = to_embeds(dataset['x'][indices[offset:offset + batch_size]])
y_batch = dataset['y'][indices[offset:offset + batch_size]]
data, target = V(torch.from_numpy(x_batch).cuda()), V(torch.from_numpy(y_batch).cuda())
# forward
output = net(data)
pre1 = C_hat.t()[torch.cuda.LongTensor(target.data)]
pre2 = torch.mul(F.softmax(output), pre1)
loss = -(torch.log(pre2.sum(1))).mean(0)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# //////////////////////// evaluate method ////////////////////////
net.eval()
data, target = V(torch.from_numpy(to_embeds(X_test)).cuda(), volatile=True), \
V(torch.from_numpy(Y_test.astype(np.long)).cuda(), volatile=True)
output = net(data)
pred = output.data.max(1)[1]
correct = pred.eq(target.data).sum()
test_acc = correct / len(Y_test)
# nudge garbage collector
del dataset; del gold
return test_acc, baseline_acc
# //////////////////////// run experiments ////////////////////////
corruption_fnctn = uniform_mix_C if args.corruption_type == 'uniform_mix' else flip_labels_C
filename = './' + args.method + '_' + args.corruption_type
results = {}
for gold_fraction in [0.01, 0.05, 0.25]:
results[gold_fraction] = {}
for corruption_level in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
test_acc, baseline_acc = train_and_test(args.method, corruption_level, gold_fraction, corruption_fnctn)
results[gold_fraction][corruption_level] = {}
results[gold_fraction][corruption_level]['method'] = test_acc
results[gold_fraction][corruption_level]['baseline'] = baseline_acc
print('Gold fraction:', gold_fraction, '| Corruption level:', corruption_level,
'| Method acc:', results[gold_fraction][corruption_level]['method'],
'| Baseline acc:', results[gold_fraction][corruption_level]['baseline'])
print()
with open(filename, 'wb') as file:
pickle.dump(results, file)
print("Dumped results_ours in file: " + filename)
| 12,745 | 36.269006 | 129 | py |
glc | glc-master/CIFAR/train_confusion.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
from torch.autograd import Variable as V
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted import CIFAR10, CIFAR100
from PIL import Image
np.random.seed(1)
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
args = parser.parse_args()
import socket
print()
print("This is on machine:", socket.gethostname())
print()
print(args)
print()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(1)
if args.ngpu > 0:
torch.cuda.manual_seed(1)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = V(data.cuda()), V(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def get_C_hat_transpose():
probs = []
net.eval()
for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
# we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
torch.autograd.Variable((target - num_classes).cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
preds = np.argmax(probs, axis=1)
C_hat = np.zeros([num_classes, num_classes])
for i in range(len(train_data_gold.train_labels)):
C_hat[int(np.rint(train_data_gold.train_labels[i] - num_classes)), preds[i]] += 1
C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01 # smoothing
return C_hat.T.astype(np.float32)
C_hat_transpose = torch.from_numpy(get_C_hat_transpose())
C_hat_transpose = V(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_all_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat_transpose[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + float(loss.cpu().data.numpy()[0]) * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 16,534 | 38.747596 | 126 | py |
glc | glc-master/CIFAR/train_ours.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
from torch.autograd import Variable as V
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
import socket
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
# random seed
parser.add_argument('--seed', type=int, default=1)
args = parser.parse_args()
np.random.seed(args.seed)
print()
print("This is on machine:", socket.gethostname())
print()
print(args)
print()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices, seed=args.seed)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices, seed=args.seed)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(args.seed)
if args.ngpu > 0:
torch.cuda.manual_seed(args.seed)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = V(data.cuda()), V(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def get_C_hat_transpose():
probs = []
net.eval()
for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
# we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
data, target = V(data.cuda(), volatile=True),\
V((target - num_classes).cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
C_hat = np.zeros((num_classes, num_classes))
for label in range(num_classes):
indices = np.arange(len(train_data_gold.train_labels))[
np.isclose(np.array(train_data_gold.train_labels) - num_classes, label)]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
return C_hat.T.astype(np.float32)
C_hat_transpose = torch.from_numpy(get_C_hat_transpose())
C_hat_transpose = V(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_all_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat_transpose[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + float(loss.cpu().data.numpy()[0]) * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 16,549 | 38.688249 | 126 | py |
glc | glc-master/CIFAR/train_forward_gold.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
np.random.seed(1)
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip" or "hierarchical").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
args = parser.parse_args()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(1)
if args.ngpu > 0:
torch.cuda.manual_seed(1)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = torch.autograd.Variable(data.cuda()), torch.autograd.Variable(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + loss.data[0] * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
torch.autograd.Variable(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def get_C_hat_transpose():
probs = []
net.eval()
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
torch.autograd.Variable(target.cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
C_hat = np.zeros((num_classes, num_classes))
for label in range(num_classes):
class_probs = probs[:, label]
if args.dataset == 'cifar10':
threshold = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= threshold] = 0
C_hat[label] = probs[np.argmax(class_probs)]
# C_hat[label] = probs[np.argsort(class_probs)][-1]
return C_hat.T.astype(np.float32)
C_hat_transpose = torch.from_numpy(get_C_hat_transpose())
C_hat_transpose = torch.autograd.Variable(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_all_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = torch.autograd.Variable(torch.FloatTensor(data_g).cuda()),\
torch.autograd.Variable(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = torch.autograd.Variable(torch.FloatTensor(data_s).cuda()),\
torch.autograd.Variable(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat_transpose[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + float(loss.cpu().data.numpy()[0]) * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 16,509 | 39.268293 | 141 | py |
glc | glc-master/CIFAR/train_gold_only.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
np.random.seed(1)
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
args = parser.parse_args()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_gold_loader = torch.utils.data.DataLoader(
train_data_gold, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(1)
if args.ngpu > 0:
torch.cuda.manual_seed(1)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = torch.autograd.Variable(data.cuda()), torch.autograd.Variable(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
torch.autograd.Variable(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
continue # we skip this training step
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def get_C_hat_transpose():
# probs = []
# net.eval()
# for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
# # we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
# data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
# torch.autograd.Variable((target - num_classes).cuda(), volatile=True)
# # forward
# output = net(data)
# pred = F.softmax(output)
# probs.extend(list(pred.data.cpu().numpy()))
# probs = np.array(probs, dtype=np.float32)
# C_hat = np.zeros((num_classes, num_classes))
# for label in range(num_classes):
# indices = np.arange(len(train_data_gold.train_labels))[
# np.isclose(np.array(train_data_gold.train_labels) - num_classes, label)]
# C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
# if args.adjust is True:
# base_rate_clean = [0] * num_classes
# base_rate_corr = [0] * num_classes
# for label in range(num_classes):
# base_rate_clean[label] = sum(np.isclose(np.array(train_data_gold.train_labels) - num_classes, label))
# base_rate_corr[label] = sum(np.isclose(np.array(train_data_silver.train_labels), label))
# base_rate_clean = np.array(base_rate_clean).reshape((1, -1)) / len(train_data_gold.train_labels)
# base_rate_corr = np.array(base_rate_corr).reshape((1, -1)) / len(train_data_silver.train_labels)
# C_hat_better = cvxpy.Variable(num_classes, num_classes)
# objective = cvxpy.Minimize(
# 1e-2 * cvxpy.sum_squares(C_hat_better - C_hat) / num_classes +
# cvxpy.sum_squares(base_rate_clean * C_hat_better - base_rate_corr))
# constraints = [0 <= C_hat_better, C_hat_better <= 1, 1 == cvxpy.sum_entries(C_hat_better, axis=1)]
# prob = cvxpy.Problem(objective, constraints)
# prob.solve()
# C_hat = np.array(C_hat_better.value)
C_hat = np.eye(num_classes)
return C_hat.T.astype(np.float32)
C_hat_transpose = torch.from_numpy(get_C_hat_transpose())
C_hat_transpose = torch.autograd.Variable(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_gold_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = torch.autograd.Variable(torch.FloatTensor(data_g).cuda()),\
torch.autograd.Variable(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = torch.autograd.Variable(torch.FloatTensor(data_s).cuda()),\
torch.autograd.Variable(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat_transpose[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + float(loss.cpu().data.numpy()[0]) * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 17,700 | 39.691954 | 126 | py |
glc | glc-master/CIFAR/train_ideal.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
from torch.autograd import Variable as V
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
# random seed
parser.add_argument('--seed', type=int, default=1)
args = parser.parse_args()
np.random.seed(args.seed)
import socket
print()
print("This is on machine:", socket.gethostname())
print()
print(args)
print()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(args.seed)
if args.ngpu > 0:
torch.cuda.manual_seed(args.seed)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = V(data.cuda()), V(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
break
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def get_C_hat_transpose():
probs = []
net.eval()
for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
# we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
data, target = V(data.cuda(), volatile=True),\
V((target - num_classes).cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
C_hat = np.zeros((num_classes, num_classes))
for label in range(num_classes):
indices = np.arange(len(train_data_gold.train_labels))[
np.isclose(np.array(train_data_gold.train_labels) - num_classes, label)]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
return C_hat.T.astype(np.float32)
C_hat_transpose = torch.FloatTensor(train_data_silver.C).t()
C_hat_transpose = V(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_all_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat_transpose[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + float(loss.cpu().data.numpy()[0]) * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 16,528 | 38.733173 | 126 | py |
glc | glc-master/CIFAR/train_ours_adjusted.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
from torch.autograd import Variable as V
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
# random seed
parser.add_argument('--seed', type=int, default=1)
args = parser.parse_args()
np.random.seed(args.seed)
import socket
print()
print("This is on machine:", socket.gethostname())
print()
print(args)
print()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
true_C = train_data_silver.C
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(args.seed)
if args.ngpu > 0:
torch.cuda.manual_seed(args.seed)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = V(data.cuda()), V(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def get_C_hat_transpose():
probs = []
net.eval()
for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
# we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
data, target = V(data.cuda(), volatile=True),\
V((target - num_classes).cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
C_hat = np.zeros((num_classes, num_classes))
for label in range(num_classes):
indices = np.arange(len(train_data_gold.train_labels))[
np.isclose(np.array(train_data_gold.train_labels) - num_classes, label)]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
import cvxpy
base_rate_clean = [0] * num_classes
base_rate_corr = [0] * num_classes
for label in range(num_classes):
base_rate_clean[label] = sum(np.array(train_data_gold.train_labels) == label)
base_rate_corr[label] = sum(np.array(train_data_silver.train_labels) == label)
base_rate_clean = np.array(base_rate_clean).reshape((1,-1)) / len(train_data_gold.train_labels)
base_rate_corr = np.array(base_rate_corr).reshape((1,-1)) / len(train_data_silver.train_labels)
print(base_rate_clean)
print(base_rate_corr)
C_hat_better = cvxpy.Variable(num_classes,num_classes)
objective = cvxpy.Minimize(
1e-2*cvxpy.sum_squares(C_hat_better - C_hat)/num_classes +
cvxpy.sum_squares(base_rate_clean * C_hat_better - base_rate_corr))
constraints = [0 <= C_hat_better, C_hat_better <= 1, 1 == cvxpy.sum_entries(C_hat_better, axis=1)]
prob = cvxpy.Problem(objective, constraints)
prob.solve()
C_hat = np.array(C_hat_better.value)
return C_hat.T.astype(np.float32)
C_hat = get_C_hat_transpose().T
print('Our C_hat (adjusted)\n')
print(repr(C_hat))
print()
print('True C\n')
print(repr(true_C))
print()
print('MSE between them:', np.mean(np.power(true_C - C_hat, 2)))
C_hat_transpose = torch.from_numpy(get_C_hat_transpose())
C_hat_transpose = V(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_all_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat_transpose[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + float(loss.cpu().data.numpy()[0]) * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 17,771 | 38.145374 | 126 | py |
glc | glc-master/CIFAR/train_ours_calibrated.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
from torch.autograd import Variable as V
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
# random seed
parser.add_argument('--seed', type=int, default=1)
args = parser.parse_args()
np.random.seed(args.seed)
import socket
print()
print("This is on machine:", socket.gethostname())
print()
print(args)
print()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(args.seed)
if args.ngpu > 0:
torch.cuda.manual_seed(args.seed)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = V(data.cuda()), V(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def process_data(data_loader):
logits = []
confidence = []
correct = []
for data, target in data_loader:
data, target = V(data.cuda(), volatile=True), V(target.cuda(), volatile=True)
output = net(data)
logits.extend(output.cpu().data.numpy())
confidence.extend(F.softmax(output, dim=1).max(1)[0].cpu().data.numpy().squeeze().tolist())
pred = output.data.max(1)[1]
correct.extend(pred.eq(target.data).cpu().numpy().squeeze().tolist())
logits = np.array(logits).astype(np.float32)
confidence = np.array(confidence).astype(np.float32)
correct = np.array(correct).astype(np.float32)
return logits, confidence, correct
def calib_err(confidence, correct, p='2'):
beta = 100
idxs = np.argsort(confidence)
confidence = confidence[idxs]
correct = correct[idxs]
bins = [[i * beta, (i + 1) * beta] for i in range(len(confidence) // beta)]
bins[-1] = [bins[-1][0], len(confidence)]
cerr = 0
total_examples = len(confidence)
for i in range(len(bins) - 1):
bin_confidence = confidence[bins[i][0]:bins[i][1]]
bin_correct = correct[bins[i][0]:bins[i][1]]
num_examples_in_bin = len(bin_confidence)
if num_examples_in_bin > 0:
difference = np.abs(np.nanmean(bin_confidence) - np.nanmean(bin_correct))
if p == '2':
cerr += num_examples_in_bin / total_examples * np.square(difference)
elif p == '1':
cerr += num_examples_in_bin / total_examples * difference
elif p == 'infty' or p == 'infinity' or p == 'max':
cerr = np.maximum(cerr, difference)
else:
assert False, "p must be '1', '2', or 'infty'"
if p == '2':
cerr = np.sqrt(cerr)
return cerr
def tune_temp(logits, labels, correct):
set_size = logits.shape[0]
t = cx.V()
expr = sum([cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t)
for i in range(set_size)])
p = cx.Problem(expr)
p.solve()
t = 1 / t.value
confidence_with_temp_change = \
F.softmax(V(torch.from_numpy(logits / t), volatile=True), dim=1).data.max(1)[0].numpy().squeeze()
old_calib_err = calib_err(F.softmax(logits), correct, p='2')
curr_calib_err = calib_err(confidence_with_temp_change, correct, p='2')
print('Val Temperature: \t\t{:.2f}'.format(t))
print('Val Calibration Error: \t\t{:.2f}'.format(100 * curr_calib_err))
print('Old Calibration Error: \t\t{:.2f}'.format(100 * old_calib_err))
return t
def get_C_hat_transpose():
net.eval()
logits, _, correct = process_data(train_gold_deterministic_loader)
t = tune_temp(logits, train_gold_deterministic_loader.train_labels, correct)
probs = []
for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
data, target = V(data.cuda(), volatile=True),\
V((target - num_classes).cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output / t)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
C_hat = np.zeros((num_classes, num_classes))
for label in range(num_classes):
indices = np.arange(len(train_data_gold.train_labels))[
np.isclose(np.array(train_data_gold.train_labels) - num_classes, label)]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
return C_hat.T.astype(np.float32)
true_C = train_data_silver.C
C_hat = get_C_hat_transpose().T
print('Our C_hat (hopefully calibrated)\n')
print(repr(C_hat))
print()
print('True C\n')
print(repr(true_C))
print()
print('MSE between them:', np.mean(np.power(true_C - C_hat, 2)))
C_hat_transpose = torch.from_numpy(get_C_hat_transpose())
C_hat_transpose = V(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_all_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = net(data_s)
pre1 = C_hat_transpose[torch.cuda.LongTensor(target_s.data)]
pre2 = torch.mul(F.softmax(output_s), pre1)
loss_s = -(torch.log(pre2.sum(1))).sum(0)
loss_g = 0
if gold_len > 0:
output_g = net(data_g)
loss_g = F.cross_entropy(output_g, target_g, size_average=False)
# backward
loss = (loss_g + loss_s)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + float(loss.cpu().data.numpy()[0]) * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 19,418 | 37.076471 | 126 | py |
glc | glc-master/CIFAR/train_forward.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
import socket
np.random.seed(1)
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip" or "hierarchical").')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=256)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
args = parser.parse_args()
print()
print("This is on machine:", socket.gethostname())
print()
print(args)
print()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, distinguish_gold=False)
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_silver_deterministic = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, distinguish_gold=False)
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_silver_deterministic = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return 50000
train_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_loader_deterministic = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold_deterministic.train_data, train_data_silver_deterministic.train_data)),
torch.from_numpy(np.array(train_data_gold_deterministic.train_labels + train_data_silver_deterministic.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(1)
if args.ngpu > 0:
torch.cuda.manual_seed(1)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# # saving so we can start again from these same weights when applying the correction
# torch.save(net.state_dict(), os.path.join(
# args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
# train function (forward, backward, update)
def train(no_correction=True, C_hat_transpose=None):
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = torch.autograd.Variable(data.cuda()), torch.autograd.Variable(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
if no_correction:
loss = F.cross_entropy(output, target)
else:
pre1 = C_hat_transpose[torch.cuda.LongTensor(target.data)]
pre2 = torch.mul(F.softmax(output), pre1)
loss = -(torch.log(pre2.sum(1))).mean()
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + loss.data[0] * 0.8
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
torch.autograd.Variable(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
print('\nNow retraining with correction\n')
def get_C_hat_transpose():
probs = []
net.eval()
for batch_idx, (data, target) in enumerate(train_loader_deterministic):
data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
torch.autograd.Variable(target.cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
C_hat = np.zeros((num_classes, num_classes))
for label in range(num_classes):
class_probs = probs[:, label]
if args.dataset == 'cifar10':
threshold = np.percentile(class_probs, 97, interpolation='higher')
class_probs[class_probs >= threshold] = 0
C_hat[label] = probs[np.argmax(class_probs)]
# C_hat[label] = probs[np.argsort(class_probs)][-1]
return C_hat.T.astype(np.float32)
C_hat_transpose = torch.from_numpy(get_C_hat_transpose())
C_hat_transpose = torch.autograd.Variable(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
# model_name = os.path.join(
# args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
# net.load_state_dict(torch.load(model_name))
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train(no_correction=False, C_hat_transpose=C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
# try: os.remove(os.path.join(args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) +\
# args.corruption_type + '_init.pytorch'))
# except: True # prodigious programming form
| 15,082 | 39.986413 | 141 | py |
glc | glc-master/CIFAR/load_corrupted_data.py | from PIL import Image
import os
import os.path
import errno
import numpy as np
import sys
import pickle
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
import torch
import torch.nn.functional as F
from torch.autograd import Variable as V
import wideresnet as wrn
import torchvision.transforms as transforms
def uniform_mix_C(mixing_ratio, num_classes):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob, num_classes, seed=1):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
def __init__(self, root='~/home-nfs/dan/cifar_data', train=True, gold=True, gold_fraction=0.1,
corruption_prob=0, corruption_type='unif', transform=None, target_transform=None,
download=False, shuffle_indices=None, distinguish_gold=True, seed=1):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.gold = gold
self.gold_fraction = gold_fraction
self.corruption_prob = corruption_prob
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
self.train_coarse_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.train_data.append(entry['data'])
if 'labels' in entry:
self.train_labels += entry['labels']
num_classes = 10
else:
self.train_labels += entry['fine_labels']
self.train_coarse_labels += entry['coarse_labels']
num_classes = 100
fo.close()
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((50000, 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
if gold is True:
if shuffle_indices is None:
indices = np.arange(50000)
shuffled_train_labels = self.train_labels
while len(set(shuffled_train_labels[:int(gold_fraction * 50000)])) < num_classes:
np.random.shuffle(indices)
shuffled_train_labels = list(np.array(self.train_labels)[indices])
else:
indices = shuffle_indices
self.train_data = self.train_data[indices][:int(gold_fraction * 50000)]
if distinguish_gold:
# this ad-hoc move is done so we can identify which examples are
# gold/trusted and which are silver/unstrusted
self.train_labels = list(np.array(self.train_labels)[indices][:int(gold_fraction * 50000)] + num_classes)
else:
self.train_labels = list(np.array(self.train_labels)[indices][:int(gold_fraction * 50000)])
self.shuffle_indices = indices
else:
indices = np.arange(len(self.train_data)) if shuffle_indices is None else shuffle_indices
self.train_data = self.train_data[indices][int(gold_fraction * 50000):]
self.train_labels = list(np.array(self.train_labels)[indices][int(gold_fraction * 50000):])
if corruption_type == 'hierarchical':
self.train_coarse_labels = list(np.array(self.train_coarse_labels)[indices][int(gold_fraction * 50000):])
if corruption_type == 'unif':
C = uniform_mix_C(self.corruption_prob, num_classes)
self.C = C
elif corruption_type == 'flip':
C = flip_labels_C(self.corruption_prob, num_classes)
self.C = C
elif corruption_type == 'hierarchical':
assert num_classes == 100, 'You must use CIFAR-100 with the hierarchical corruption.'
coarse_fine = []
for i in range(20):
coarse_fine.append(set())
for i in range(len(self.train_labels)):
coarse_fine[self.train_coarse_labels[i]].add(self.train_labels[i])
for i in range(20):
coarse_fine[i] = list(coarse_fine[i])
C = np.eye(num_classes) * (1 - corruption_prob)
for i in range(20):
tmp = np.copy(coarse_fine[i])
for j in range(len(tmp)):
tmp2 = np.delete(np.copy(tmp), j)
C[tmp[j], tmp2] += corruption_prob * 1/len(tmp2)
self.C = C
elif corruption_type == 'clabels':
net = wrn.WideResNet(40, num_classes, 2, dropRate=0.3).cuda()
model_name = './cifar{}_labeler'.format(num_classes)
net.load_state_dict(torch.load(model_name))
net.eval()
else:
assert False, "Invalid corruption type '{}' given. Must be in {'unif', 'flip', 'hierarchical'}".format(corruption_type)
np.random.seed(seed)
if corruption_type == 'clabels':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
# obtain sampling probabilities
sampling_probs = []
print('Starting labeling')
for i in range((len(self.train_labels) // 64) + 1):
current = self.train_data[i*64:(i+1)*64]
current = [Image.fromarray(current[i]) for i in range(len(current))]
current = torch.cat([test_transform(current[i]).unsqueeze(0) for i in range(len(current))], dim=0)
data = V(current).cuda()
logits = net(data)
smax = F.softmax(logits / 5) # temperature of 1
sampling_probs.append(smax.data.cpu().numpy())
sampling_probs = np.concatenate(sampling_probs, 0)
print('Finished labeling 1')
new_labeling_correct = 0
argmax_labeling_correct = 0
for i in range(len(self.train_labels)):
old_label = self.train_labels[i]
new_label = np.random.choice(num_classes, p=sampling_probs[i])
self.train_labels[i] = new_label
if old_label == new_label:
new_labeling_correct += 1
if old_label == np.argmax(sampling_probs[i]):
argmax_labeling_correct += 1
print('Finished labeling 2')
print('New labeling accuracy:', new_labeling_correct / len(self.train_labels))
print('Argmax labeling accuracy:', argmax_labeling_correct / len(self.train_labels))
else:
for i in range(len(self.train_labels)):
self.train_labels[i] = np.random.choice(num_classes, p=C[self.train_labels[i]])
self.corruption_matrix = C
else:
f = self.test_list[0][0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
if self.gold is True:
return int(self.gold_fraction * 50000)
else:
return 50000 - int(self.gold_fraction * 50000)
else:
return 10000
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.url, root, self.filename, self.tgz_md5)
# extract file
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
| 11,936 | 40.737762 | 139 | py |
glc | glc-master/CIFAR/train_convex_combo.py | # -*- coding: utf-8 -*-
import argparse
import os
import time
import math
import json
import torch
from torch.autograd import Variable as V
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
import wideresnet as wrn
import numpy as np
from load_corrupted_data import CIFAR10, CIFAR100
from PIL import Image
import socket
np.random.seed(1)
# note: nosgdr, schedule, and epochs are highly related settings
parser = argparse.ArgumentParser(description='Trains WideResNet on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str, help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
# Optimization options
parser.add_argument('--nosgdr', default=False, action='store_true', help='Turn off SGDR.')
parser.add_argument('--epochs', '-e', type=int, default=75, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--gold_fraction', '-gf', type=float, default=0.1, help='What fraction of the data should be trusted?')
parser.add_argument('--corruption_prob', '-cprob', type=float, default=0.3, help='The label corruption probability.')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif', help='Type of corruption ("unif" or "flip").')
parser.add_argument('--adjust', '-a', action='store_true', help='Adjust the C_hat estimate with base-rate information.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=128)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs. Use when SGDR is off.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability (default: 0.0)')
parser.add_argument('--nonlinearity', type=str, default='relu', help='Nonlinearity (relu, elu, gelu).')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
# other
parser.add_argument('--lambda_choice', choices=['theirs', '1_minus_theirs', '0.5'], default='theirs')
args = parser.parse_args()
print()
print("This is on machine:", socket.gethostname())
print()
print(args)
print()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(args.log, args.dataset + '_log.txt'), 'w')
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(state) + '\n')
sn_state = {k: v for k, v in args._get_kwargs()}
sn_state['tt'] = 0 # SGDR variable
sn_state['init_learning_rate'] = args.learning_rate
log.write(json.dumps(sn_state) + '\n')
class Dataset(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TensorDataset(Dataset):
def __init__(self, data_tensor, target_tensor, transform):
# assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
self.transform = transform
def __getitem__(self, index):
img, target = self.data_tensor[index], self.target_tensor[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return self.target_tensor.size()[0]
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_gold = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
for i in range(50):
indices = np.arange(len(train_data_gold.train_data))
np.random.shuffle(indices)
combo_train_data_gold = TensorDataset(train_data_gold.train_data[indices][len(train_data_gold.train_data)//4:],
torch.from_numpy(np.array(train_data_gold.train_labels)[indices][len(train_data_gold.train_data)//4:]),
train_transform)
combo_val_data_gold = TensorDataset(train_data_gold.train_data[indices][:len(train_data_gold.train_data)//4],
torch.from_numpy(np.array(train_data_gold.train_labels)[indices][:len(train_data_gold.train_data)//4]),
test_transform)
if (len(np.unique(combo_val_data_gold.target_tensor.numpy())) == 10) and (len(np.unique(combo_train_data_gold.target_tensor.numpy())) == 10):
print('Successfully split gold into a train and dev set with all the classes in each.')
break
train_data_silver = CIFAR10(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR10(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR10(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 10
elif args.dataset == 'cifar100':
train_data_gold = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True)
for i in range(50):
indices = np.arange(len(train_data_gold.train_data))
np.random.shuffle(indices)
combo_train_data_gold = TensorDataset(train_data_gold.train_data[indices][len(train_data_gold.train_data)//4:],
torch.from_numpy(np.array(train_data_gold.train_labels)[indices][len(train_data_gold.train_data)//4:]),
train_transform)
combo_val_data_gold = TensorDataset(train_data_gold.train_data[indices][:len(train_data_gold.train_data)//4],
torch.from_numpy(np.array(train_data_gold.train_labels)[indices][:len(train_data_gold.train_data)//4]),
test_transform)
if (len(np.unique(combo_val_data_gold.target_tensor.numpy())) == 100) and (len(np.unique(combo_train_data_gold.target_tensor.numpy())) == 100):
print('Successfully split gold into a train and dev set with all the classes in each.')
break
train_data_silver = CIFAR100(
args.data_path, True, False, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=train_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
train_data_gold_deterministic = CIFAR100(
args.data_path, True, True, args.gold_fraction, args.corruption_prob, args.corruption_type,
transform=test_transform, download=True, shuffle_indices=train_data_gold.shuffle_indices)
test_data = CIFAR100(args.data_path, train=False, transform=test_transform, download=True)
num_classes = 100
train_silver_loader = torch.utils.data.DataLoader(
train_data_silver, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
combo_train_gold_loader = torch.utils.data.DataLoader(
combo_train_data_gold, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
combo_val_gold_loader = torch.utils.data.DataLoader(
combo_val_data_gold, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_gold_deterministic_loader = torch.utils.data.DataLoader(
train_data_gold_deterministic, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
train_all_loader = torch.utils.data.DataLoader(
TensorDataset(np.vstack((train_data_gold.train_data, train_data_silver.train_data)),
torch.from_numpy(np.array(train_data_gold.train_labels + train_data_silver.train_labels)),
train_transform),
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(net)
small_net = wrn.WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
print(small_net)
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
small_net = torch.nn.DataParallel(small_net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
small_net.cuda()
torch.manual_seed(1)
if args.ngpu > 0:
torch.cuda.manual_seed(1)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
sn_optimizer = torch.optim.SGD(small_net.parameters(), sn_state['learning_rate'], momentum=sn_state['momentum'],
weight_decay=sn_state['decay'], nesterov=True)
# saving so we can start again from these same weights when applying the correction
torch.save(net.state_dict(), os.path.join(
args.save, args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
# Restore model
start_epoch = 0
# if args.load != '':
# for i in range(args.epochs-1,-1,-1):
# model_name = os.path.join(args.load, args.dataset + '_model_epoch' + str(i) + '.pytorch')
# if os.path.isfile(model_name):
# net.load_state_dict(torch.load(model_name))
# start_epoch = i+1
# print('Model restored! Epoch:', i)
# break
# if start_epoch == 0:
# assert False, "could not resume"
cudnn.benchmark = True # fire on all cylinders
def train_small_net():
small_net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(combo_train_gold_loader):
data, target = V(data.cuda()), V(target.cuda())
# forward
output = small_net(data)
# backward
sn_optimizer.zero_grad()
loss = F.cross_entropy(output, target - num_classes)
loss.backward()
sn_optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
sn_state['tt'] += float(dt)/(len(combo_train_gold_loader.dataset)/float(args.batch_size))
if sn_state['tt'] >= math.pi - 0.05:
sn_state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + sn_state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
sn_state['learning_rate'] = new_lr
for param_group in sn_optimizer.param_groups:
param_group['lr'] = sn_state['learning_rate']
sn_state['train_loss'] = loss_avg
def get_small_net_ap():
small_net.eval()
tp_count = np.zeros(num_classes)
tp_fp_count = np.zeros(num_classes)
for batch_idx, (data, target) in enumerate(combo_val_gold_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
target -= num_classes
# forward
output = small_net(data)
# if batch_idx == 10:
# break
# average precision
pred = output.data.max(1)[1]
batch_correct = pred.eq(target.data)
for i in range(len(batch_correct)):
tp_count[pred[i]] += batch_correct[i]
tp_fp_count[pred[i]] += 1
precisions = tp_count / (tp_fp_count + 1e-12)
average_precision = np.mean(precisions)
return average_precision
def get_C2_hat(C_hat):
"""
:param C_hat: an estimate from our method as a numpy array
"""
y_br = np.zeros(num_classes)
y_tilde_br = np.zeros(num_classes)
for i in range(len(train_data_gold)):
y_br[train_data_gold.train_labels[i] - num_classes] += 1
for i in range(len(train_data_silver)):
y_tilde_br[train_data_silver.train_labels[i]] += 1
y_br /= np.sum(y_br)
y_tilde_br /= np.sum(y_tilde_br)
C2_hat = (C_hat.T * y_br.reshape(1, num_classes)) / y_tilde_br.reshape(num_classes, 1)
return C2_hat
def get_noisy_labels_ap(C_hat):
"""
:param C_hat: an estimate from our method as a numpy array
"""
y_br = np.zeros(num_classes)
y_tilde_br = np.zeros(num_classes)
for i in range(len(train_data_gold)):
y_br[train_data_gold.train_labels[i] - num_classes] += 1
for i in range(len(train_data_silver)):
y_tilde_br[train_data_silver.train_labels[i]] += 1
y_br /= np.sum(y_br)
y_tilde_br /= np.sum(y_tilde_br)
print(y_br)
print(y_tilde_br)
print(C_hat.sum(1))
print(C_hat)
average_precision = 0
classes_predicted = 0
for i in range(num_classes):
if y_tilde_br[i] != 0:
average_precision += C_hat[i][i] * y_br[i] / y_tilde_br[i]
classes_predicted += 1
average_precision /= classes_predicted
return average_precision
def test_small_net():
small_net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = small_net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
sn_state['test_loss'] = loss_avg / len(test_loader)
sn_state['test_accuracy'] = correct / len(test_loader.dataset)
def train_phase1():
net.train() # enter train mode
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_silver_loader):
data, target = V(data.cuda()), V(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + loss.data[0] * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_silver_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = V(data.cuda(), volatile=True),\
V(target.cuda(), volatile=True)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum()
# test loss average
loss_avg += loss.data[0]
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loops
print('Training small net:\n')
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
sn_state['epoch'] = epoch
begin_epoch = time.time()
train_small_net()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test_small_net()
print('Small net average precision:', get_small_net_ap())
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(sn_state))
log.flush()
print(sn_state)
print('\n\nTraining C_hat estimation net:\n')
for epoch in range(start_epoch, args.epochs):
# if epoch < 150:
# state['learning_rate'] = state['init_learning_rate']
# elif epoch >= 150 and epoch < 225:
# state['learning_rate'] = state['init_learning_rate'] * args.gamma
# elif epoch >= 225:
# state['learning_rate'] = state['init_learning_rate'] * (args.gamma ** 2)
# for param_group in optimizer.param_groups:
# param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
begin_epoch = time.time()
train_phase1()
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
def get_C_hat_transpose():
probs = []
net.eval()
for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
# we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
data, target = V(data.cuda(), volatile=True),\
V((target - num_classes).cuda(), volatile=True)
# forward
output = net(data)
pred = F.softmax(output)
probs.extend(list(pred.data.cpu().numpy()))
probs = np.array(probs, dtype=np.float32)
C_hat = np.zeros((num_classes, num_classes))
for label in range(num_classes):
indices = np.arange(len(train_data_gold.train_labels))[
np.isclose(np.array(train_data_gold.train_labels) - num_classes, label)]
C_hat[label] = np.mean(probs[indices], axis=0, keepdims=True)
# if args.adjust is True:
# base_rate_clean = [0] * num_classes
# base_rate_corr = [0] * num_classes
# for label in range(num_classes):
# base_rate_clean[label] = sum(np.isclose(np.array(train_data_gold.train_labels) - num_classes, label))
# base_rate_corr[label] = sum(np.isclose(np.array(train_data_silver.train_labels), label))
# base_rate_clean = np.array(base_rate_clean).reshape((1, -1)) / len(train_data_gold.train_labels)
# base_rate_corr = np.array(base_rate_corr).reshape((1, -1)) / len(train_data_silver.train_labels)
# C_hat_better = cvxpy.Variable(num_classes, num_classes)
# objective = cvxpy.Minimize(
# 1e-2 * cvxpy.sum_squares(C_hat_better - C_hat) / num_classes +
# cvxpy.sum_squares(base_rate_clean * C_hat_better - base_rate_corr))
# constraints = [0 <= C_hat_better, C_hat_better <= 1, 1 == cvxpy.sum_entries(C_hat_better, axis=1)]
# prob = cvxpy.Problem(objective, constraints)
# prob.solve()
# C_hat = np.array(C_hat_better.value)
return C_hat.T.astype(np.float32)
C_hat = get_C_hat_transpose().T
clean_ap = get_small_net_ap()
noisy_ap = get_noisy_labels_ap(C_hat)
if args.lambda_choice == 'theirs':
combo_lambda = float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '1_minus_theirs':
combo_lambda = 1 - float(clean_ap / (noisy_ap + clean_ap))
elif args.lambda_choice == '0.5':
combo_lambda = 0.5
print('Clean AP: {}, Noisy AP: {}, Combo Lambda: {}'.format(clean_ap, noisy_ap, combo_lambda))
print('\n\nNow beginning training of main net\n')
C_hat_transpose = torch.from_numpy(np.eye(num_classes, dtype=np.float32)) # not using our correction with convex combo
C_hat_transpose = V(C_hat_transpose.cuda(), requires_grad=False)
# /////// Resetting the network ////////
state = {k: v for k, v in args._get_kwargs()}
state['tt'] = 0 # SGDR variable
state['init_learning_rate'] = args.learning_rate
state['learning_rate'] = state['init_learning_rate']
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
model_name = os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch')
net.load_state_dict(torch.load(model_name))
def train_phase2(C_hat_transpose):
net.train() # enter train mode
small_net.eval()
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_all_loader):
# we subtract num_classes because we added num_classes to allow us to identify gold examples
data, target = data.numpy(), target.numpy()
gold_indices = target > (num_classes - 1)
gold_len = np.sum(gold_indices)
if gold_len > 0:
data_g, target_g = data[gold_indices], target[gold_indices] - num_classes
data_g, target_g = V(torch.FloatTensor(data_g).cuda()),\
V(torch.from_numpy(target_g).long().cuda())
silver_indices = target < num_classes
silver_len = np.sum(silver_indices)
if silver_len > 0:
data_s, target_s = data[silver_indices], target[silver_indices]
data_s, target_s = V(torch.FloatTensor(data_s).cuda()),\
V(torch.from_numpy(target_s).long().cuda())
optimizer.zero_grad()
# forward
loss_s = 0
if silver_len > 0:
output_s = torch.matmul(F.softmax(net(data_s)), C_hat_transpose.t())
soft_target = F.softmax(V(small_net(data_s).data))
target_one_hot = torch.FloatTensor(int(silver_len), num_classes)
target_one_hot.zero_().scatter_(1, target_s.cpu().data.unsqueeze(1), 1.0)
target_one_hot = V(target_one_hot).cuda()
target_combo = (combo_lambda * target_one_hot) + ((1 - combo_lambda) * soft_target)
loss_s = -(target_combo * torch.log(output_s)).sum()
loss_g = 0
if gold_len > 0:
output_g = F.softmax(net(data_g))
soft_target = F.softmax(V(small_net(data_g).data))
target_one_hot = torch.FloatTensor(int(gold_len), num_classes)
target_one_hot.zero_().scatter_(1, target_g.cpu().data.unsqueeze(1), 1.0)
target_one_hot = V(target_one_hot).cuda()
target_combo = (combo_lambda * target_one_hot) + ((1 - combo_lambda) * soft_target)
loss_s = -(target_combo * torch.log(output_g)).sum()
# backward
loss = (loss_s + loss_g)/args.batch_size
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + float(loss.cpu().data.numpy()[0]) * 0.2
if args.nosgdr is False: # Use a cyclic learning rate
dt = math.pi/float(args.epochs)
state['tt'] += float(dt)/(len(train_all_loader.dataset)/float(args.batch_size))
if state['tt'] >= math.pi - 0.05:
state['tt'] = math.pi - 0.05
curT = math.pi/2.0 + state['tt']
new_lr = args.learning_rate * (1.0 + math.sin(curT))/2.0 # lr_min = 0, lr_max = lr
state['learning_rate'] = new_lr
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['train_loss'] = loss_avg
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train_phase2(C_hat_transpose)
print('Epoch', epoch, '| Time Spent:', round(time.time() - begin_epoch, 2))
test()
# torch.save(net.state_dict(), os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch) + '.pytorch'))
# Let us not waste space and delete the previous model
# We do not overwrite the model because we need the epoch number
# try: os.remove(os.path.join(args.save, args.dataset + '_model_epoch' + str(epoch-1) + '.pytorch'))
# except: True # prodigious programming form
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
try: os.remove(os.path.join(
args.save,
args.dataset+'_'+str(args.gold_fraction) + str(args.corruption_prob) + args.corruption_type + '_init.pytorch'))
except: True
| 27,037 | 39.235119 | 151 | py |
glc | glc-master/CIFAR/wideresnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
| 3,863 | 41 | 116 | py |
gccaps | gccaps-master/gccaps/gated_conv.py | from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Multiply
def block(x, n_filters=64, pool_size=(2, 2), dropout_rate=0.2):
"""Apply two gated convolutions followed by a max-pooling operation.
Batch normalization and dropout are applied for regularization.
Args:
x (tensor): Input tensor to transform.
n_filters (int): Number of filters for each gated convolution.
pool_size (int or tuple): Pool size of max-pooling operation.
dropout_rate (float): Fraction of units to drop.
Returns:
A Keras tensor of the resulting output.
"""
x = GatedConv(n_filters, padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = Dropout(rate=dropout_rate)(x)
x = GatedConv(n_filters, padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = Dropout(rate=dropout_rate)(x)
return MaxPooling2D(pool_size=pool_size)(x)
class GatedConv(Conv2D):
"""A Keras layer implementing gated convolutions [1]_.
Args:
n_filters (int): Number of output filters.
kernel_size (int or tuple): Size of convolution kernel.
strides (int or tuple): Strides of the convolution.
padding (str): One of ``'valid'`` or ``'same'``.
kwargs: Other layer keyword arguments.
References:
.. [1] Y. N. Dauphin, A. Fan, M. Auli, and D. Grangier,
“Language modeling with gated convolutional networks,” in
Proc. 34th Int. Conf. Mach. Learn. (ICML), vol. 70,
Sydney, Australia, 2017, pp. 933–941.
"""
def __init__(self, n_filters=64, kernel_size=(3, 3), **kwargs):
super(GatedConv, self).__init__(filters=n_filters*2,
kernel_size=kernel_size,
**kwargs)
self.n_filters = n_filters
def call(self, inputs):
"""Apply gated convolution."""
output = super(GatedConv, self).call(inputs)
n_filters = self.n_filters
linear = Activation('linear')(output[:, :, :, :n_filters])
sigmoid = Activation('sigmoid')(output[:, :, :, n_filters:])
return Multiply()([linear, sigmoid])
def compute_output_shape(self, input_shape):
"""Compute shape of layer output."""
output_shape = super(GatedConv, self).compute_output_shape(input_shape)
return tuple(output_shape[:3]) + (self.n_filters,)
def get_config(self):
"""Return the config of the layer."""
config = super(GatedConv, self).get_config()
config['n_filters'] = self.n_filters
del config['filters']
return config
| 2,785 | 34.717949 | 79 | py |
gccaps | gccaps-master/gccaps/main.py | import argparse
import glob
import os
import pickle
import sys
import numpy as np
import config as cfg
import utils
def main():
"""Execute a task based on the given command-line arguments.
This function is the main entry-point of the program. It allows the
user to extract features, train a model, generate predictions, or
evaluate predictions using the command-line interface.
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='mode')
# Add sub-parser for feature extraction
parser_extract = subparsers.add_parser('extract')
parser_extract.add_argument('dataset',
choices=['training', 'validation', 'test'],
)
# Add sub-parser for training
subparsers.add_parser('train')
# Add sub-parser for inference
parser_predict = subparsers.add_parser('predict')
parser_predict.add_argument('dataset',
nargs='?',
choices=['validation', 'test'],
default='test',
)
# Add sub-parser for evaluation
parser_evaluate = subparsers.add_parser('evaluate')
parser_evaluate.add_argument('task',
nargs='?',
choices=['tagging', 'sed', 'all'],
default='all',
)
parser_evaluate.add_argument('dataset',
nargs='?',
choices=['validation', 'test'],
default='test',
)
parser_evaluate.add_argument('--thresholds', action='store_true')
args = parser.parse_args()
if args.mode == 'extract':
extract(cfg.to_dataset(args.dataset))
elif args.mode == 'train':
train()
elif args.mode == 'predict':
predict(cfg.to_dataset(args.dataset))
elif args.mode == 'evaluate':
eval_all = args.task == 'all'
dataset = cfg.to_dataset(args.dataset)
if args.task == 'tagging' or eval_all:
evaluate_audio_tagging(dataset, args.thresholds)
if args.task == 'sed' or eval_all:
evaluate_sed(dataset)
def extract(dataset):
"""Extract feature vectors from the given dataset.
Args:
dataset: Dataset to extract features from.
"""
import data_augmentation as aug
import features
# Use a logmel representation for feature extraction
extractor = features.LogmelExtractor(sample_rate=cfg.sample_rate,
n_window=cfg.n_window,
hop_length=cfg.hop_length,
n_mels=cfg.n_mels,
)
# Prepare for data augmentation if enabled
file_names, target_values = utils.read_metadata(dataset.metadata_path)
if dataset == cfg.training_set and cfg.enable_augmentation:
n_transforms_iter = aug.transform_counts(target_values)
file_names = aug.expand_metadata((file_names, target_values))[0]
else:
n_transforms_iter = None
# Ensure output directory exists and set file path
os.makedirs(cfg.extraction_path, exist_ok=True)
output_path = os.path.join(cfg.extraction_path, dataset.name + '.h5')
# Save free parameters to disk
utils.log_parameters(cfg.logmel, os.path.join(cfg.extraction_path,
'parameters.json'))
# Generate features for each audio clip in the dataset
features.extract_dataset(dataset.path,
file_names,
extractor,
cfg.clip_duration,
output_path,
n_transforms_iter=n_transforms_iter,
)
def train():
"""Train the neural network model.
See Also:
:func:`training.train`
Note:
For reproducibility, the random seed is set to a fixed value.
"""
import training
# Ensure output directories exist
os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)
os.makedirs(cfg.model_path, exist_ok=True)
os.makedirs(cfg.log_path, exist_ok=True)
# Load (standardized) input data and target values
tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)
val_x, val_y, _ = _load_data(cfg.validation_set)
# Try to create reproducible results
np.random.seed(cfg.initial_seed)
# Save free parameters to disk
utils.log_parameters(cfg.training, os.path.join(cfg.model_path,
'parameters.json'))
training.train(tr_x, tr_y, val_x, val_y)
def predict(dataset):
"""Generate predictions for audio tagging and sound event detection.
This function uses an ensemble of trained models to generate the
predictions, with the averaging function being an arithmetic mean.
Computed predictions are then saved to disk.
Args:
dataset: Dataset to generate predictions for.
"""
import capsnet
# Load (standardized) input data and associated file names
test_x, _, names = _load_data(dataset)
# Predict class probabilities for each model (epoch)
at_preds, sed_preds = [], []
for epoch in _determine_epochs(cfg.prediction_epochs):
model = _load_model(epoch)
at_pred, sed_pred = utils.timeit(
lambda: capsnet.gccaps_predict(test_x, model),
'[Epoch %d] Predicted class probabilities' % epoch)
at_preds.append(at_pred)
sed_preds.append(sed_pred)
# Average predictions to give an overall output
total_at_pred = np.mean(at_preds, axis=0)
total_sed_pred = np.mean(sed_preds, axis=0)
# Ensure output directory exists and set file path format
os.makedirs(os.path.dirname(cfg.predictions_path), exist_ok=True)
predictions_path = cfg.predictions_path.format('%s', dataset.name)
# Save free parameters to disk
utils.log_parameters({'prediction_epochs': cfg.prediction_epochs},
os.path.join(os.path.dirname(cfg.predictions_path),
'parameters.json'))
# Write predictions to disk
utils.write_predictions(names, total_at_pred, predictions_path % 'at')
utils.write_predictions(names, total_sed_pred, predictions_path % 'sed')
def evaluate_audio_tagging(dataset, compute_thresholds=False):
"""Evaluate the audio tagging predictions and write results.
Args:
dataset: Dataset for retrieving ground truth.
compute_thresholds (bool): Whether to compute and record
per-class optimal thresholds.
See Also:
:func:`evaluation.compute_thresholds`
"""
import evaluation
_, y_true = utils.read_metadata(dataset.metadata_path)
path = cfg.predictions_path.format('at', dataset.name)
_, y_pred = utils.read_predictions(path)
# Compute thresholds if flag is set
if compute_thresholds:
thresholds = evaluation.compute_thresholds(y_true, y_pred)
output_path = os.path.join(os.path.dirname(cfg.predictions_path),
'thresholds.p')
with open(output_path, 'wb') as f:
pickle.dump(thresholds, f)
# Evaluate audio tagging performance
threshold = _determine_threshold(cfg.at_threshold)
scores = evaluation.evaluate_audio_tagging(
y_true, y_pred, threshold=threshold)
# Ensure output directory exist and write results
os.makedirs(os.path.dirname(cfg.results_path), exist_ok=True)
output_path = cfg.results_path.format('at', dataset.name)
evaluation.write_audio_tagging_results(scores, output_path)
def evaluate_sed(dataset):
"""Evaluate the sound event detection predictions and print results.
Args:
dataset: Dataset for retrieving ground truth.
"""
import evaluation
import inference
names, ground_truth = utils.read_metadata(dataset.metadata_path,
weakly_labeled=False)
# Load and binarize predictions
path = cfg.predictions_path.format('sed', dataset.name)
_, y_pred = utils.read_predictions(path)
threshold = _determine_threshold(cfg.sed_threshold)
y_pred_b = inference.binarize_predictions_3d(y_pred,
threshold=threshold,
n_dilation=cfg.sed_dilation,
n_erosion=cfg.sed_erosion)
# Convert to event list format and evaluate SED performance
resolution = cfg.clip_duration / y_pred.shape[2]
predictions = inference.generate_event_lists(y_pred_b, resolution)
metrics = evaluation.evaluate_sed(ground_truth, predictions, names)
# Ensure output directory exist and write results
os.makedirs(os.path.dirname(cfg.results_path), exist_ok=True)
output_path = cfg.results_path.format('sed', dataset.name)
with open(output_path, 'w') as f:
f.write(metrics.result_report_overall())
f.write(metrics.result_report_class_wise())
def _load_data(dataset, is_training=False):
"""Load input data, target values and file names for a dataset.
The input data is assumed to be a dataset of feature vectors. These
feature vectors are standardized using a scaler that is either
loaded from disk (if it exists) or computed on-the-fly. The latter
is only possible if the input data is training data, which is
indicated by the `is_training` parameter.
Target values and file names are read from the metadata file.
Args:
dataset: Structure encapsulating dataset information.
training (bool): Whether the input data is training data.
Returns:
x (np.ndarray): The input data.
y (np.ndarray): The target values.
names (list): The associated file names.
"""
import data_augmentation as aug
import features
features_path = os.path.join(cfg.extraction_path, dataset.name + '.h5')
x = utils.timeit(lambda: features.load_features(features_path),
'Loaded features of %s dataset' % dataset.name)
# Clip dynamic range to 90 dB
x = np.maximum(x, x.max() - 90.0)
# Load scaler from file if cached, or else compute it.
scaler_path = cfg.scaler_path
if os.path.exists(scaler_path) or not is_training:
with open(scaler_path, 'rb') as f:
scaler = pickle.load(f)
else:
scaler = utils.timeit(lambda: utils.compute_scaler(x),
'Computed standard scaler')
with open(scaler_path, 'wb') as f:
pickle.dump(scaler, f)
x = utils.timeit(lambda: utils.standardize(x, scaler),
'Standardized %s features' % dataset.name)
names, y = utils.timeit(lambda: utils.read_metadata(dataset.metadata_path),
'Loaded %s metadata' % dataset.name)
if dataset == cfg.training_set and cfg.enable_augmentation:
names, y = aug.expand_metadata((names, y))
return x, y, names
def _determine_epochs(spec, n=5):
"""Return a list of epoch numbers based on the given argument.
If `spec` is a list, this function simply returns the list.
Otherwise, `spec` should be a string, in which case this function
returns the top `n` epochs based on the training history file
and the contents of `spec`. For example, if `spec` is ``'val_acc'``,
the epochs that achieved the highest accuracy are returned.
Args:
spec: A list of epoch numbers or a string specifying how to
select the epoch numbers.
Returns:
list: The relevant epoch numbers.
"""
if type(spec) is list:
return spec
history = utils.read_training_history(cfg.history_path, ordering=spec)
return [int(epoch) + 1 for epoch, *_ in history[:n]]
def _determine_threshold(threshold, clip_min=0.1, clip_max=0.9):
"""Return the actual threshold(s) to use based on the given value.
Args:
threshold (number or list): A value of -1 indicates that the
thresholds should be loaded from a file. Otherwise, it is
simply the value this function should return.
clip_min (float): Minimum value that a threshold should be. Only
enforced if the thresholds are loaded from disk.
clip_max (float): Maximum value that a threshold should be. Only
enforced if the thresholds are loaded from disk.
Returns:
float or list: The appropriate threshold(s).
"""
if threshold != -1:
return threshold
path = os.path.join(os.path.dirname(cfg.predictions_path), 'thresholds.p')
if not os.path.isfile(path):
print('Warning: Defaulting to threshold of 0.5')
return 0.5
with open(path, 'rb') as f:
thresholds = pickle.load(f)
return np.clip(thresholds, clip_min, clip_max)
def _load_model(epoch):
"""Load model based on specified epoch number.
Args:
epoch (int): Epoch number of the model to load.
Returns:
An instance of a Keras model.
"""
import keras.models
from capsules import CapsuleLayer
from gated_conv import GatedConv
model_path = glob.glob(os.path.join(
cfg.model_path, '*.%.02d*.hdf5' % epoch))[0]
custom_objects = {
'GatedConv': GatedConv,
'CapsuleLayer': CapsuleLayer,
}
return keras.models.load_model(model_path, custom_objects)
if __name__ == '__main__':
sys.exit(main())
| 13,757 | 34.367609 | 79 | py |
gccaps | gccaps-master/gccaps/capsnet.py | import numpy as np
from keras import backend as K
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import Reshape
from keras.layers import TimeDistributed
from keras.layers import BatchNormalization
from keras.models import Model
import capsules
from capsules import CapsuleLayer
import gated_conv
def gccaps(input_shape, n_classes):
"""Create a model using the *GCCaps* architecture.
Args:
input_shape (tuple): Shape of the input tensor.
n_classes (int): Number of classes for classification.
Returns:
A Keras model of the GCCaps architecture.
"""
input_tensor = Input(shape=input_shape, name='input_tensor')
x = Reshape(input_shape + (1,))(input_tensor)
# Apply three blocks of gated convolutions
x = gated_conv.block(x, n_filters=64, pool_size=(2, 2))
x = gated_conv.block(x, n_filters=64, pool_size=(2, 2))
x = gated_conv.block(x, n_filters=64, pool_size=(2, 2))
n_steps = int(x.shape[1]) # Number of time slices
# Apply primary capsule layer with batch norm and dropout
x = capsules.primary_capsules(x, n_channels=16, dim_capsule=4,
kernel_size=3, strides=(1, 2),
padding='same', activation='relu',
name='primary_capsule_conv')
x = Reshape((n_steps, -1, 4))(x)
x = BatchNormalization(axis=-1)(x)
x = Dropout(rate=0.5)(x)
# Apply capsule layer layer to each time slice
caps = TimeDistributed(CapsuleLayer(n_capsules=n_classes,
dim_capsule=8, routings=3))(x)
caps = TimeDistributed(Lambda(capsules.length),
name='capsule_length')(caps)
# Apply 'temporal attention' layer to each time slice
att = Reshape((n_steps, -1))(x)
att = TimeDistributed(Dense(n_classes, activation='sigmoid'),
name='attention_layer')(att)
# Merge the aforementioned TimeDistributed outputs
x = Lambda(_merge, output_shape=(n_classes,),
name='merge')([caps, att])
return Model(input_tensor, x, name='GCCaps')
def gccaps_predict(x, model, batch_size=32):
"""Generate output predictions for the given input examples.
Args:
x (np.ndarray): Array of input examples.
model: Keras model of GCCaps architecture.
batch_size (int): Number of examples in a mini-batch.
Returns:
tuple: A tuple containing the audio tagging predictions and
SED predictions.
"""
# Compute audio tagging predictions
at_preds = model.predict(x, batch_size=batch_size)
# Prepare for sound event detection
input_tensor = model.get_layer('input_tensor').input
capsule_output = model.get_layer('capsule_length').output
func = K.function([input_tensor, K.learning_phase()], [capsule_output])
# Compute sound event detection predictions
n_steps = int(np.ceil(len(x) / batch_size))
sed_preds = [func([x[batch_size*i:batch_size*(i+1)]])[0]
for i in range(n_steps)]
# Transpose so that final dimension is the time axis
sed_preds = np.transpose(np.concatenate(sed_preds), (0, 2, 1))
return at_preds, sed_preds
def _merge(inputs):
"""Merge the given pair of inputs across the temporal dimension.
Args:
inputs (list): Pair of inputs to merge. Each input should be a
T x L Keras tensor (excluding batch dimension), where T is
the temporal dimension and L is the number of classes.
Returns:
A Keras tensor (vector) of length L.
"""
caps, att = inputs
att = K.clip(att, K.epsilon(), 1.)
return K.sum(caps * att, axis=1) / K.sum(att, axis=1)
| 3,817 | 33.089286 | 75 | py |
gccaps | gccaps-master/gccaps/training.py | import os
from sklearn import metrics
from keras.callbacks import Callback
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
from keras.callbacks import LearningRateScheduler
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
import keras.utils
import capsnet
import config as cfg
import data_generator
import evaluation
import inference
def train(tr_x, tr_y, val_x, val_y):
"""Train a neural network using the given training set.
Args:
tr_x (np.ndarray): Array of training examples.
tr_y (np.ndarray): Target values of the training examples.
val_x (np.ndarray): Array of validation examples.
val_y (np.ndarray): Target values of the validation examples.
"""
# Create model and print summary
model = capsnet.gccaps(input_shape=tr_x.shape[1:],
n_classes=tr_y.shape[1])
_print_model_summary(model)
# Use Adam SGD optimizer
optimizer = Adam(lr=cfg.learning_rate['initial'])
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'],
)
# Create the appropriate callbacks to use during training
callbacks = _create_callbacks()
# Set a large value for `n_epochs` if early stopping is used
n_epochs = cfg.n_epochs
if n_epochs < 0:
n_epochs = 10000
# Train model using class-balancing generator
batch_size = cfg.batch_size
generator = data_generator.balanced_generator(tr_x, tr_y, batch_size)
steps_per_epoch = len(tr_x) // batch_size
return model.fit_generator(generator=generator,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs,
callbacks=callbacks,
validation_data=(val_x, val_y),
use_multiprocessing=False,
)
class EERLogger(Callback):
"""A callback for computing the equal error rate (EER).
At the end of each epoch, the EER is computed and logged for the
predictions of the validation dataset.
"""
def on_epoch_end(self, epoch, logs=None):
"""Compute the EER of the validation set predictions."""
x, y_true = self.validation_data[:2]
y_pred = self.model.predict(x)
rate = evaluation.compute_eer(y_true.flatten(), y_pred.flatten())
# Log the computed value
logs = logs or {}
logs['val_eer'] = rate
class MAPLogger(Callback):
"""A callback for computing the mean average precision at k (MAP@k).
At the end of each epoch, the MAP is computed and logged for the
predictions of the validation dataset. It is assumed that the ground
truths are single-label.
Args:
k (int): The maximum number of predicted elements.
Attributes:
k (int): The maximum number of predicted elements.
"""
def __init__(self, k=3):
super(MAPLogger, self).__init__()
self.k = k
def on_epoch_end(self, epoch, logs=None):
"""Compute the MAP of the validation set predictions."""
x, y_true = self.validation_data[:2]
y_pred = self.model.predict(x)
map_k = evaluation.compute_map(y_true, y_pred, self.k)
# Log the computed value
logs = logs or {}
logs['val_map'] = map_k
class F1ScoreLogger(Callback):
"""A callback for computing the F1 score.
At the end of each epoch, the F1 score is computed and logged for
the predictions of the validation dataset.
Args:
threshold (float): Threshold used to binarize predictions.
Attributes:
threshold (float): Threshold used to binarize predictions.
"""
def __init__(self, threshold=0.5):
super(F1ScoreLogger, self).__init__()
self.threshold = threshold
def on_epoch_end(self, epoch, logs=None):
"""Compute the F1 score of the validation set predictions."""
x, y_true = self.validation_data[:2]
y_pred = self.model.predict(x)
y_pred_b = inference.binarize_predictions_2d(y_pred, self.threshold)
f1_score = metrics.f1_score(y_true, y_pred_b, average='micro')
# Log the computed value
logs = logs or {}
logs['val_f1_score'] = f1_score
def _print_model_summary(model):
"""Print a summary of the model and also write the summary to disk.
Args:
model: The Keras model to summarize.
"""
keras.utils.print_summary(model)
with open(os.path.join(cfg.model_path, 'summary.txt'), 'w') as f:
keras.utils.print_summary(model, print_fn=lambda s: f.write(s + '\n'))
def _create_callbacks():
"""Create a list of training callbacks.
Up to four callbacks are included in the list:
* A callback for saving models.
* A callback for using TensorBoard.
* An optional callback for learning rate decay.
* An optional callback for early stopping.
Returns:
list: List of Keras callbacks.
"""
# Create callbacks for computing various metrics and logging them
callbacks = [F1ScoreLogger(), EERLogger(), CSVLogger(cfg.history_path)]
# Create callback to save model after every epoch
model_path = cfg.model_path
path = os.path.join(model_path, 'gccaps.{epoch:02d}-{val_acc:.4f}.hdf5')
callbacks.append(ModelCheckpoint(filepath=path,
monitor='val_acc',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1,
))
# Create callback for TensorBoard logs
callbacks.append(TensorBoard(cfg.log_path, batch_size=cfg.batch_size))
lr_decay = cfg.learning_rate['decay']
if lr_decay < 1.:
# Create callback to decay learning rate
def _lr_schedule(epoch, lr):
decay = epoch % cfg.learning_rate['decay_rate'] == 0
return lr * lr_decay if decay else lr
callbacks.append(LearningRateScheduler(schedule=_lr_schedule))
if cfg.n_epochs == -1:
# Create callback to use an early stopping condition
callbacks.append(EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
))
return callbacks
| 6,619 | 32.948718 | 78 | py |
gccaps | gccaps-master/gccaps/capsules.py | """See Also: https://github.com/XifengGuo/CapsNet-Keras"""
import keras.backend as K
import keras.initializers as initializers
from keras.layers import Conv2D
from keras.layers import Layer
from keras.layers import Lambda
from keras.layers import Reshape
class CapsuleLayer(Layer):
"""A Keras layer implementing capsule routing [1]_.
Args:
n_capsules (int): Number of output capsules.
dim_capsule (int): Number of units per output capsule.
routings (int): Number of routing iterations.
use_bias (bool): Whether to use a bias vector.
kernel_initializer: Initializer for the kernel weights.
bias_initializer: Initializer for the bias weights.
kwargs: Other layer keyword arguments.
Attributes:
n_capsules (int): Number of output capsules.
dim_capsule (int): Number of units per output capsule.
routings (int): Number of routing iterations.
use_bias (bool): Whether to use a bias vector.
kernel_initializer: Initializer for the kernel weights.
bias_initializer: Initializer for the bias weights.
References:
.. [1] S. Sabour, N. Frosst, and G. E. Hinton, “Dynamic routing
between capsules,” in Adv. Neural Inf. Process. Syst.
(NIPS), Long Beach, CA, 2017, pp. 3859–3869.
"""
def __init__(self, n_capsules, dim_capsule, routings=3, use_bias=False,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
**kwargs):
super(CapsuleLayer, self).__init__(**kwargs)
self.n_capsules = n_capsules
self.dim_capsule = dim_capsule
self.routings = routings
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
"""Create the layer weights."""
self.n_input_capsules = input_shape[1]
self.dim_input_capsule = input_shape[2]
self.W = self.add_weight(shape=(self.n_capsules,
self.n_input_capsules,
self.dim_capsule,
self.dim_input_capsule),
initializer=self.kernel_initializer,
name='W')
if self.use_bias:
self.bias = self.add_weight(shape=(self.n_capsules,),
initializer=self.bias_initializer,
name='bias')
super(CapsuleLayer, self).build(input_shape)
def call(self, inputs, training=None):
"""Apply transformation followed by capsule routing."""
# Create dimension for output capsules and tile along this dim
# (None, *n_capsules*, n_input_capsules, dim_input_capsules)
inputs_tiled = K.tile(K.expand_dims(inputs, 1),
[1, self.n_capsules, 1, 1])
# Apply linear transformation to compute prediction vectors
inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]),
elems=inputs_tiled)
# Add bias to prediction vectors if specified
if self.use_bias:
inputs_hat = K.bias_add(inputs_hat, self.bias,
data_format='channels_first')
# Initialize logit variables to zero
b = K.zeros(shape=[K.shape(inputs_hat)[0],
self.n_capsules,
self.n_input_capsules])
# Apply routing algorithm
for i in range(self.routings):
# Compute coupling coefficients
c = K.softmax(b, axis=1)
# Apple squashing function
outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))
# Update logits by computing agreement
if i < self.routings - 1:
b += K.batch_dot(outputs, inputs_hat, [2, 3])
return outputs
def compute_output_shape(self, input_shape):
"""Compute shape of layer output."""
return (input_shape[0], self.n_capsules, self.dim_capsule)
def get_config(self):
"""Return the config of the layer."""
config = super(CapsuleLayer, self).get_config()
config['n_capsules'] = self.n_capsules
config['dim_capsule'] = self.dim_capsule
config['routings'] = self.routings
config['kernel_initializer'] = self.kernel_initializer
config['use_bias'] = self.use_bias
return config
def primary_capsules(x, n_channels, dim_capsule, kernel_size=(3, 3), **kwargs):
"""Apply a convolution followed by a squashing function.
Args:
x (tensor): Input tensor to transform.
n_channels (int): Number of channels per capsule.
dim_capsule (int): Number of activation units per capsule.
kernel_size (int or tuple): Size of convolution kernel.
kwargs: Other layer keyword arguments.
Returns:
A Keras tensor with shape ``(None, -1, dim_capsule)``.
"""
x = Conv2D(n_channels * dim_capsule, kernel_size, **kwargs)(x)
x = Reshape((-1, dim_capsule))(x)
return Lambda(squash)(x)
def squash(x, axis=-1):
"""Apply a squashing nonlinearity as described in [1]_.
Args:
x (tensor): Input tensor to transform.
axis (int): Axis along which squashing is applied.
Returns:
A Keras tensor of the resulting output.
"""
s_squared_norm = K.sum(K.square(x), axis, keepdims=True)
scale = s_squared_norm / (1 + s_squared_norm) \
/ K.sqrt(s_squared_norm + K.epsilon())
return scale * x
def length(x):
"""Compute the Euclidean lengths of capsules.
Args:
x (tensor): Tensor of capsules.
Returns:
A Keras tensor of lengths.
"""
return K.sqrt(K.sum(K.square(x), -1))
| 5,929 | 36.531646 | 79 | py |
gccaps | gccaps-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../gccaps'))
# -- Project information -----------------------------------------------------
project = 'gccaps'
copyright = '2018, Turab Iqbal'
author = 'Turab Iqbal'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'gccapsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gccaps.tex', 'gccaps Documentation',
'Turab Iqbal', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gccaps', 'gccaps Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'gccaps', 'gccaps Documentation',
author, 'gccaps', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 4,903 | 29.08589 | 79 | py |
GradNCP | GradNCP-main/main.py | import torch
from torch.utils.data import DataLoader
from common.args import parse_args
from common.utils import InfiniteSampler, get_optimizer, load_model
from data.dataset import get_dataset
from models.model import get_model
from train.trainer import meta_trainer
from utils import Logger, set_random_seed
def main(rank, P):
P.rank = rank
""" set torch device"""
if torch.cuda.is_available():
torch.cuda.set_device(P.rank)
device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
""" fixing randomness """
set_random_seed(P.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
""" define dataset """
train_set, test_set = get_dataset(P, dataset=P.dataset)
""" define dataloader """
kwargs = {'pin_memory': True, 'num_workers': 0}
train_sampler = InfiniteSampler(train_set, rank=rank, num_replicas=1, shuffle=True, seed=P.seed)
train_loader = DataLoader(train_set, sampler=train_sampler, batch_size=P.batch_size, num_workers=4, prefetch_factor=2)
test_loader = DataLoader(test_set, shuffle=False, batch_size=P.test_batch_size, **kwargs)
""" Initialize model, optimizer """
model = get_model(P).to(device)
optimizer = get_optimizer(P, model)
""" define train and test type """
from train import setup as train_setup
from evals import setup as test_setup
train_func, fname, today = train_setup(P.mode, P)
test_func = test_setup(P.mode, P)
""" define logger """
logger = Logger(fname, ask=P.resume_path is None, today=today, rank=P.rank)
logger.log(P)
logger.log(model)
""" load model if necessary """
load_model(P, model, logger)
""" apply data parrallel for multi-gpu training """
if P.data_parallel:
raise NotImplementedError() # Currently having some error with DP
""" train """
meta_trainer(P, train_func, test_func, model, optimizer, train_loader, test_loader, logger)
""" close tensorboard """
logger.close_writer()
if __name__ == "__main__":
""" argument define """
P = parse_args()
P.world_size = torch.cuda.device_count()
P.data_parallel = P.world_size > 1
# We use data parallel (DP) rather than distributed data parallel (DDP)
# Currently, Meta-learning with DDP cause a problem, see below issues:
# https://github.com/pytorch/pytorch/issues/47562
# https://github.com/pytorch/pytorch/issues/48531
# https://github.com/pytorch/pytorch/issues/63812
# if P.distributed:
# os.environ["MASTER_ADDR"] = 'localhost'
# os.environ["MASTER_PORT"] = P.port
# mp.spawn(main, nprocs=P.world_size, args=(P,))
# else:
main(0, P)
| 2,721 | 32.195122 | 122 | py |
GradNCP | GradNCP-main/utils.py | import pickle
import random
import shutil
import sys
from datetime import datetime
import os
import time
from collections import OrderedDict, defaultdict, deque
import numpy as np
import torch
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Logger(object):
"""Reference: https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514"""
def __init__(self, fn, ask=True, today=True, rank=0):
self.rank = rank
self.log_path = './logs/'
self.logdir = None
if self.rank == 0:
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.today = today
logdir = self._make_dir(fn)
if not os.path.exists(logdir):
os.mkdir(logdir)
if len(os.listdir(logdir)) != 0 and ask:
ans = input("log_dir is not empty. All data inside log_dir will be deleted. "
"Will you proceed [y/N]? ")
if ans in ['y', 'Y']:
shutil.rmtree(logdir)
else:
exit(1)
self.set_dir(logdir)
def _make_dir(self, fn):
if self.today:
today = datetime.today().strftime("%y%m%d")
logdir = self.log_path + today + '_' + fn
else:
logdir = self.log_path + fn
return logdir
def set_dir(self, logdir, log_fn='log.txt'):
self.logdir = logdir
if not os.path.exists(logdir):
os.mkdir(logdir)
self.writer = SummaryWriter(logdir)
self.log_file = open(os.path.join(logdir, log_fn), 'a')
def close_writer(self):
if self.rank == 0:
self.writer.close()
def log(self, string):
if self.rank == 0:
self.log_file.write('[%s] %s' % (datetime.now(), string) + '\n')
self.log_file.flush()
print('[%s] %s' % (datetime.now(), string))
sys.stdout.flush()
def log_dirname(self, string):
if self.rank == 0:
self.log_file.write('%s (%s)' % (string, self.logdir) + '\n')
self.log_file.flush()
print('%s (%s)' % (string, self.logdir))
sys.stdout.flush()
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
if self.rank == 0: self.writer.add_scalar(tag, value, step)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_checkpoint(logdir, mode='last'):
model_path = os.path.join(logdir, f'{mode}.model')
optim_path = os.path.join(logdir, f'{mode}.optim')
config_path = os.path.join(logdir, f'{mode}.configs')
lr_path = os.path.join(logdir, f'{mode}.lr')
print("=> Loading checkpoint from '{}'".format(logdir))
if os.path.exists(model_path):
model_state = torch.load(model_path)
optim_state = torch.load(optim_path)
with open(config_path, 'rb') as handle:
cfg = pickle.load(handle)
else:
return None, None, None, None
if os.path.exists(lr_path):
lr_dict = torch.load(lr_path)
else:
lr_dict = None
return model_state, optim_state, cfg, lr_dict
def save_checkpoint(P, step, best, model, optim_state, logdir,
is_best=False, suffix='', data_parallel=False):
if P.rank != 0:
return None
if is_best:
prefix = 'best'
else:
prefix = 'last'
if data_parallel:
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
last_model = os.path.join(logdir, f'{prefix}{suffix}.model')
last_optim = os.path.join(logdir, f'{prefix}{suffix}.optim')
last_config = os.path.join(logdir, f'{prefix}{suffix}.configs')
if isinstance(P.inner_lr, OrderedDict):
last_lr = os.path.join(logdir, f'{prefix}{suffix}.lr')
torch.save(P.inner_lr, last_lr)
if hasattr(P, 'moving_average'):
last_ema = os.path.join(logdir, f'{prefix}{suffix}.ema')
torch.save(P.moving_average, last_ema)
if hasattr(P, 'moving_inner_lr'):
last_lr_ema = os.path.join(logdir, f'{prefix}{suffix}.lr_ema')
torch.save(P.moving_inner_lr, last_lr_ema)
opt = {
'step': step,
'best': best
}
torch.save(model_state, last_model)
torch.save(optim_state, last_optim)
with open(last_config, 'wb') as handle:
pickle.dump(opt, handle, protocol=pickle.HIGHEST_PROTOCOL)
def save_checkpoint_step(P, step, best, model, optim_state,
logdir, suffix='', data_parallel=False):
if P.rank != 0:
return None
if data_parallel:
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
last_model = os.path.join(logdir, f'step{step}{suffix}.model')
last_optim = os.path.join(logdir, f'step{step}{suffix}.optim')
last_config = os.path.join(logdir, f'step{step}{suffix}.configs')
if isinstance(P.inner_lr, OrderedDict):
last_lr = os.path.join(logdir, f'step{step}{suffix}.lr')
torch.save(P.inner_lr, last_lr)
if hasattr(P, 'moving_average'):
last_ema = os.path.join(logdir, f'step{step}{suffix}.ema')
torch.save(P.moving_average, last_ema)
if hasattr(P, 'moving_inner_lr'):
last_lr_ema = os.path.join(logdir, f'step{step}{suffix}.lr_ema')
torch.save(P.moving_inner_lr, last_lr_ema)
opt = {
'step': step,
'best': best
}
torch.save(model_state, last_model)
torch.save(optim_state, last_optim)
with open(last_config, 'wb') as handle:
pickle.dump(opt, handle, protocol=pickle.HIGHEST_PROTOCOL)
def cycle(loader):
while True:
for x in loader:
yield x
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def reset(self):
self.deque.clear()
self.total = 0.0
self.count = 0
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def reset(self):
for meter in self.meters.values():
meter.reset()
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def psnr(mse):
return -10.0 * torch.log10(mse+1e-24)
def get_meta_batch(P, task_data):
if P.data_type == 'img':
batch_size = task_data['imgs'].size(0)
context = [task_data['imgs']]
else:
raise NotImplementedError()
return batch_size, context
| 11,299 | 29.376344 | 93 | py |
GradNCP | GradNCP-main/eval.py | import torch
from torch.utils.data import DataLoader
from common.args import parse_args
from common.utils import load_model
from data.dataset import get_dataset
from models.model import get_model
from utils import set_random_seed
def main():
""" argument define """
P = parse_args()
P.rank = 0
""" set torch device"""
if torch.cuda.is_available():
torch.cuda.set_device(P.rank)
device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
P.world_size = torch.cuda.device_count()
P.data_parallel = P.world_size > 1
assert not P.data_parallel # no multi GPU
""" fixing randomness """
set_random_seed(P.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
kwargs = {'batch_size': P.test_batch_size, 'shuffle': False,
'pin_memory': True, 'num_workers': 4}
test_set = get_dataset(P, dataset=P.dataset, only_test=True)
test_loader = DataLoader(test_set, **kwargs)
""" Initialize model """
model = get_model(P).to(device)
load_model(P, model)
""" define train and test type """
from evals import setup as test_setup
test_func = test_setup(P.mode, P)
""" test """
test_func(P, model, test_loader, 0.0, logger=None)
if __name__ == "__main__":
main()
| 1,320 | 26.520833 | 74 | py |
GradNCP | GradNCP-main/common/utils.py | import os
import numpy as np
import torch
import torch.optim as optim
from utils import load_checkpoint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_optimizer(P, model):
params = model.parameters()
optimizer = optim.Adam(params, lr=P.lr)
return optimizer
def is_resume(P, model, optimizer):
if P.resume_path is not None:
model_state, optim_state, config, lr_dict = load_checkpoint(P.resume_path, mode='best')
model.load_state_dict(model_state, strict=not P.no_strict)
optimizer.load_state_dict(optim_state)
start_step = config['step']
best = config['best']
is_best = False
psnr = 0.0
if lr_dict is not None:
P.inner_lr = lr_dict
else:
is_best = False
start_step = 1
best = 0.0
psnr = 0.0
return is_best, start_step, best, psnr
def load_model(P, model, logger=None):
if logger is None:
log_ = print
else:
log_ = logger.log
if P.load_path is not None:
log_(f'Load model from {P.load_path}')
checkpoint = torch.load(P.load_path)
if P.rank != 0:
model.__init_low_rank__(rank=P.rank)
not_loaded = model.load_state_dict(checkpoint, strict=P.no_strict)
print (not_loaded)
if os.path.exists(P.load_path[:-5] + 'lr'): # Meta-SGD
log_(f'Load lr from {P.load_path[:-5]}lr')
lr = torch.load(P.load_path[:-5] + 'lr')
for (_, param) in lr.items():
param.to(device)
P.inner_lr = lr
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert len(dataset) > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
| 2,709 | 28.456522 | 95 | py |
GradNCP | GradNCP-main/models/wrapper.py | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def exists(val):
return val is not None
class MetaWrapper(nn.Module):
def __init__(self, P, decoder):
super().__init__()
self.P = P
self.decoder = decoder
self.data_type = P.data_type
self.sampled_coord = None
self.sampled_index = None
self.gradncp_coord = None
self.gradncp_index = None
if self.data_type == 'img':
self.width = P.data_size[1]
self.height = P.data_size[2]
mgrid = self.shape_to_coords((self.width, self.height))
mgrid = rearrange(mgrid, 'h w c -> (h w) c')
else:
raise NotImplementedError()
self.register_buffer('grid', mgrid)
def shape_to_coords(self, spatial_shape):
coords = []
for i in range(len(spatial_shape)):
coords.append(torch.linspace(-1.0, 1.0, spatial_shape[i]))
return torch.stack(torch.meshgrid(*coords), dim=-1)
def get_batch_params(self, params, batch_size):
if params is None:
params = OrderedDict()
for name, param in self.decoder.meta_named_parameters():
params[name] = param[None, ...].repeat((batch_size,) + (1,) * len(param.shape))
return params
def coord_init(self):
self.sampled_coord = None
self.sampled_index = None
self.gradncp_coord = None
self.gradncp_index = None
def get_batch_coords(self, inputs=None, params=None):
if inputs is None and params is None:
meta_batch_size = 1
elif inputs is None:
meta_batch_size = list(params.values())[0].size(0)
else:
meta_batch_size = inputs.size(0)
# batch of coordinates
if self.sampled_coord is None and self.gradncp_coord is None:
coords = self.grid
elif self.gradncp_coord is not None:
return self.gradncp_coord, meta_batch_size
else:
coords = self.sampled_coord
coords = coords.clone().detach()[None, ...].repeat((meta_batch_size,) + (1,) * len(coords.shape))
return coords, meta_batch_size
def forward(self, inputs, params=None):
if self.data_type in ['img']:
return self.forward_image(inputs, params)
else:
raise NotImplementedError()
def sample(self, sample_type, task_data, params):
if sample_type == 'random':
self.random_sample()
elif sample_type == 'gradncp':
self.gradncp(task_data, params)
else:
raise NotImplementedError()
def gradncp(self, inputs, params):
ratio = self.P.data_ratio
inputs = inputs[0]
meta_batch_size = inputs.size(0)
coords = self.grid
coords = coords.clone().detach()[None, ...].repeat((meta_batch_size,) + (1,) * len(coords.shape))
with torch.no_grad():
out, feature = self.decoder(coords, params=params, get_features=True)
if self.data_type in ['img']:
out = rearrange(out, 'b hw c -> b c hw')
feature = rearrange(feature, 'b hw f -> b f hw')
inputs = rearrange(inputs, 'b c h w -> b c (h w)')
else:
raise NotImplementedError()
error = inputs - out # b c (hw)
gradient = -1 * feature.unsqueeze(dim=1) * error.unsqueeze(dim=2) # b c f hw
gradient_bias = -1 * error.unsqueeze(dim=2) # b c hw
gradient = torch.cat([gradient, gradient_bias], dim=2)
gradient = rearrange(gradient, 'b c f hw -> b (c f) hw')
gradient_norm = torch.norm(gradient, dim=1) # b hw
coords_len = gradient_norm.size(1)
# coords b hw dim_in
self.gradncp_index = torch.sort(
gradient_norm, dim=1, descending=True
)[1][:, :int(coords_len * ratio)] # b int(hw * ratio)
self.gradncp_coord = torch.gather(
coords, 1, self.gradncp_index.unsqueeze(dim=2).repeat(1, 1, self.P.dim_in)
)
self.gradncp_index = self.gradncp_index.unsqueeze(dim=1).repeat(1, self.P.dim_out, 1)
def random_sample(self):
coord_size = self.grid.size(0) # shape (h * w, c)
perm = torch.randperm(coord_size)
self.sampled_index = perm[:int(self.P.data_ratio * coord_size)]
self.sampled_coord = self.grid[self.sampled_index]
return self.sampled_coord
def forward_image(self, inputs=None, params=None):
if exists(inputs):
inputs = inputs[0]
coords, meta_batch_size = self.get_batch_coords(inputs, params)
out = self.decoder(coords, params=params)
out = rearrange(out, 'b hw c -> b c hw')
if exists(inputs):
if self.sampled_coord is None and self.gradncp_coord is None:
return F.mse_loss(
inputs.view(meta_batch_size, -1), out.view(meta_batch_size, -1), reduce=False
).mean(dim=1)
elif self.gradncp_coord is not None:
inputs = rearrange(inputs, 'b c h w -> b c (h w)')
inputs = torch.gather(inputs, 2, self.gradncp_index)
return F.mse_loss(
inputs.view(meta_batch_size, -1), out.view(meta_batch_size, -1), reduce=False
).mean(dim=1)
else:
inputs = rearrange(inputs, 'b c h w -> b c (h w)')[:, :, self.sampled_index]
return F.mse_loss(
inputs.view(meta_batch_size, -1), out.view(meta_batch_size, -1), reduce=False
).mean(dim=1)
out = rearrange(out, 'b c (h w) -> b c h w', h=self.height, w=self.width)
return out
| 5,909 | 36.405063 | 105 | py |
GradNCP | GradNCP-main/models/model.py | import torch
from models.inr.metasiren import MetaSiren, MetaSirenPenultimate
from models.wrapper import MetaWrapper
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_inr(P):
if P.decoder == 'siren':
if P.sample_type in ['gradncp']:
model = MetaSirenPenultimate(P.dim_in, P.dim_hidden, P.dim_out, P.num_layers,
w0=P.w0, w0_initial=P.w0, data_size=P.data_size, data_type=P.data_type)
else:
model = MetaSiren(P.dim_in, P.dim_hidden, P.dim_out, P.num_layers,
w0=P.w0, w0_initial=P.w0, data_size=P.data_size, data_type=P.data_type)
else:
raise ValueError("no such model exists, mate.")
return model
def get_model(P):
decoder = get_inr(P)
if P.data_type in ['img']:
return MetaWrapper(P, decoder)
else:
raise NotImplementedError()
| 921 | 29.733333 | 112 | py |
GradNCP | GradNCP-main/models/inr/metasiren.py | import math
import torch
import torch.nn as nn
from models.metamodule import MetaModule, MetaSequential, MetaBatchLinear
class Sine(nn.Module):
def __init__(self, w0=30.):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin(self.w0*x)
class MetaSirenLayer(MetaModule):
"""
Single layer of SIREN; uses SIREN-style init. scheme.
"""
def __init__(self, dim_in, dim_out, w0=30., c=6., is_first=False, is_final=False):
super().__init__()
# Encapsulates MetaLinear and activation.
self.linear = MetaBatchLinear(dim_in, dim_out)
self.activation = nn.Identity() if is_final else Sine(w0)
# Initializes according to SIREN init.
self.init_(c=c, w0=w0, is_first=is_first)
def init_(self, c, w0, is_first):
dim_in = self.linear.weight.size(1)
w_std = 1/dim_in if is_first else (math.sqrt(c/dim_in)/w0)
nn.init.uniform_(self.linear.weight, -w_std, w_std)
nn.init.uniform_(self.linear.bias, -w_std, w_std)
def forward(self, x, params=None):
return self.activation(self.linear(x, self.get_subdict(params, 'linear')))
class MetaReLULayer(MetaModule):
"""
Single layer of SIREN; uses SIREN-style init. scheme.
"""
def __init__(self, dim_in, dim_out, w0=30., c=6., is_first=False, is_final=False):
super().__init__()
# Encapsulates MetaLinear and activation.
self.linear = MetaBatchLinear(dim_in, dim_out)
self.activation = nn.Identity() if is_final else nn.ReLU()
# Initializes according to SIREN init.
self.init_(c=c, w0=w0, is_first=is_first)
def init_(self, c, w0, is_first):
dim_in = self.linear.weight.size(1)
w_std = 1/dim_in if is_first else (math.sqrt(c/dim_in)/w0)
nn.init.uniform_(self.linear.weight, -w_std, w_std)
nn.init.uniform_(self.linear.bias, -w_std, w_std)
def forward(self, x, params=None):
return self.activation(self.linear(x, self.get_subdict(params, 'linear')))
class MetaSiren(MetaModule):
"""
SIREN as a meta-network.
"""
def __init__(self, dim_in, dim_hidden, dim_out, num_layers=4, w0=30., w0_initial=30.,
data_type='img', data_size=(3, 178, 178)):
super().__init__()
self.num_layers = num_layers
self.dim_hidden = dim_hidden
self.w0 = w0
layers = []
for ind in range(num_layers-1):
is_first = ind == 0
layer_w0 = w0_initial if is_first else w0
layer_dim_in = dim_in if is_first else dim_hidden
layers.append(MetaSirenLayer(dim_in=layer_dim_in, dim_out=dim_hidden,
w0=layer_w0, is_first=is_first))
layers.append(MetaSirenLayer(dim_in=dim_hidden, dim_out=dim_out,
w0=w0, is_final=True))
self.layers = MetaSequential(*layers)
def forward(self, x, params=None):
return self.layers(x, params=self.get_subdict(params, 'layers')) + 0.5
class MetaSirenPenultimate(MetaModule):
"""
SIREN as a meta-network.
"""
def __init__(self, dim_in, dim_hidden, dim_out, num_layers=4, w0=30., w0_initial=30.,
data_type='img', data_size=(3, 178, 178)):
super().__init__()
self.num_layers = num_layers
self.dim_hidden = dim_hidden
self.w0 = w0
layers = []
for ind in range(num_layers-1):
is_first = ind == 0
layer_w0 = w0_initial if is_first else w0
layer_dim_in = dim_in if is_first else dim_hidden
layers.append(MetaSirenLayer(dim_in=layer_dim_in, dim_out=dim_hidden,
w0=layer_w0, is_first=is_first))
self.layers = MetaSequential(*layers)
self.last_layer = MetaSirenLayer(dim_in=dim_hidden, dim_out=dim_out,
w0=w0, is_final=True)
def forward(self, x, params=None, get_features=False):
feature = self.layers(x, params=self.get_subdict(params, 'layers'))
out = self.last_layer(feature, params=self.get_subdict(params, 'last_layer')) + 0.5
if get_features:
return out, feature
else:
return out
| 4,299 | 36.068966 | 91 | py |
GradNCP | GradNCP-main/models/metamodule/metamodule.py | import torch
import torch.nn as nn
import re
import warnings
from collections import OrderedDict
from einops import rearrange
class MetaModule(nn.Module):
"""
Base class for PyTorch meta-learning modules. These modules accept an
additional argument `params` in their `forward` method.
Notes
-----
Objects inherited from `MetaModule` are fully compatible with PyTorch
modules from `torch.nn.Module`. The argument `params` is a dictionary of
tensors, with full support of the computation graph (for differentiation).
"""
def __init__(self):
super(MetaModule, self).__init__()
self._children_modules_parameters_cache = dict()
def meta_named_parameters(self, prefix='', recurse=True):
gen = self._named_members(
lambda module: module._parameters.items()
if isinstance(module, MetaModule) else [],
prefix=prefix, recurse=recurse)
for elem in gen:
yield elem
def meta_parameters(self, recurse=True):
for name, param in self.meta_named_parameters(recurse=recurse):
yield param
def get_subdict(self, params, key=None):
if params is None:
return None
all_names = tuple(params.keys())
if (key, all_names) not in self._children_modules_parameters_cache:
if key is None:
self._children_modules_parameters_cache[(key, all_names)] = all_names
else:
key_escape = re.escape(key)
key_re = re.compile(r'^{0}\.(.+)'.format(key_escape))
self._children_modules_parameters_cache[(key, all_names)] = [
key_re.sub(r'\1', k) for k in all_names if key_re.match(k) is not None]
names = self._children_modules_parameters_cache[(key, all_names)]
if not names:
warnings.warn('Module `{0}` has no parameter corresponding to the '
'submodule named `{1}` in the dictionary `params` '
'provided as an argument to `forward()`. Using the '
'default parameters for this submodule. The list of '
'the parameters in `params`: [{2}].'.format(
self.__class__.__name__, key, ', '.join(all_names)),
stacklevel=2)
return None
return OrderedDict([(name, params[f'{key}.{name}']) for name in names])
class MetaSequential(nn.Sequential, MetaModule):
__doc__ = nn.Sequential.__doc__
def forward(self, input, params=None):
for name, module in self._modules.items():
if isinstance(module, MetaModule):
input = module(input, params=self.get_subdict(params, name))
elif isinstance(module, nn.Module):
input = module(input)
else:
raise TypeError('The module must be either a torch module '
'(inheriting from `nn.Module`), or a `MetaModule`. '
'Got type: `{0}`'.format(type(module)))
return input
class MetaBatchLinear(nn.Linear, MetaModule):
'''
A linear meta-layer that can deal with batched weight matrices and biases,
as for instance output by a hypernetwork.
inputs: [batch_size, num_grids, dim_in]
params: [batch_size, dim_out, dim_in]
'''
__doc__ = nn.Linear.__doc__
def forward(self, inputs, params=None):
if params is None:
params = OrderedDict(self.named_parameters())
for name, param in params.items():
params[name] = param[None, ...].repeat((inputs.size(0),) + (1,) * len(param.shape))
bias = params.get('bias', None)
weight = params['weight']
inputs = rearrange(inputs, 'b i o -> b o i')
output = torch.bmm(weight, inputs)
output = rearrange(output, 'b i o -> b o i')
if bias is not None:
output += bias.unsqueeze(-2)
return output
| 4,025 | 36.277778 | 99 | py |
GradNCP | GradNCP-main/evals/gradient_based/maml_full_evaluate.py | import torch
import torch.nn.functional as F
import lpips
from pytorch_msssim import ms_ssim, ssim
from train.gradient_based import inner_adapt, inner_adapt_test_scale
from utils import MetricLogger, psnr, get_meta_batch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def check(P):
filename_with_today_date = True
return filename_with_today_date
def test_model(P, wrapper, loader, steps, logger=None):
metric_logger = MetricLogger(delimiter=" ")
if logger is None:
log_ = print
else:
log_ = logger.log
# Switch to evaluate mode
mode = wrapper.training
wrapper.eval()
wrapper.coord_init()
lpips_score = lpips.LPIPS(net='alex').to(device)
kwargs = {}
if P.mode == 'maml_full_evaluate_gradscale':
adapt = inner_adapt_test_scale
kwargs['sample_type'] = P.sample_type
kwargs['scale_type'] = 'grad'
else:
adapt = inner_adapt
for n, task_data in enumerate(loader):
task_data = {k: v.to(device, non_blocking=True) for k, v in task_data.items()}
batch_size, context = get_meta_batch(P, task_data)
params = adapt(
wrapper,
context,
P.inner_lr,
P.inner_steps_test,
first_order=True,
**kwargs
)[0]
with torch.no_grad():
pred = wrapper(None, params).clamp(0, 1)
if P.data_type == 'img':
context = context[0]
lpips_result = lpips_score((pred * 2 - 1), (context * 2 - 1)).mean()
psnr_result = psnr(F.mse_loss(
context.view(batch_size, -1), pred.view(batch_size, -1), reduce=False
).mean(dim=1)).mean()
ms_ssim_result = ms_ssim(pred, context, data_range=1.0).mean()
log_ms_ssim_result = (-10. * torch.log10(1 - ms_ssim(pred, context, data_range=1.0) + 1e-24)).mean()
ssim_result = ssim(pred, context, data_range=1.0).mean()
log_ssim_result = (-10. * torch.log10(1 - ssim(pred, context, data_range=1.0) + 1e-24)).mean()
else:
raise NotImplementedError()
metric_logger.meters['lpips_result'].update(lpips_result.item(), n=batch_size)
metric_logger.meters['psnr_result'].update(psnr_result.item(), n=batch_size)
metric_logger.meters['ms_ssim_result'].update(ms_ssim_result.item(), n=batch_size)
metric_logger.meters['ssim_result'].update(ssim_result.item(), n=batch_size)
metric_logger.meters['log_ms_ssim_result'].update(log_ms_ssim_result.item(), n=batch_size)
metric_logger.meters['log_ssim_result'].update(log_ssim_result.item(), n=batch_size)
if n % 10 == 0:
# gather the stats from all processes
metric_logger.synchronize_between_processes()
log_(f' * [EVAL {n}] [PSNR %.3f] [LOG MS-SSIM %.3f] [LPIPS %.3f] '
'[SSIM %.3f] [MS-SSIM %.3f] [LOG SSIM %.3f] ' %
(metric_logger.psnr_result.global_avg, metric_logger.log_ms_ssim_result.global_avg,
metric_logger.lpips_result.global_avg, metric_logger.ssim_result.global_avg,
metric_logger.ms_ssim_result.global_avg, metric_logger.log_ssim_result.global_avg))
# gather the stats from all processes
metric_logger.synchronize_between_processes()
log_(' * [EVAL] [PSNR %.3f] [LOG MS-SSIM %.3f] [LPIPS %.3f] '
'[SSIM %.3f] [MS-SSIM %.3f] [LOG SSIM %.3f] ' %
(metric_logger.psnr_result.global_avg, metric_logger.log_ms_ssim_result.global_avg,
metric_logger.lpips_result.global_avg, metric_logger.ssim_result.global_avg,
metric_logger.ms_ssim_result.global_avg, metric_logger.log_ssim_result.global_avg))
wrapper.train(mode)
torch.cuda.empty_cache()
return metric_logger.psnr_result.global_avg
| 3,863 | 37.64 | 112 | py |
GradNCP | GradNCP-main/evals/gradient_based/maml.py | import torch
from train.gradient_based import inner_adapt
from utils import MetricLogger, psnr, get_meta_batch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def check(P):
filename_with_today_date = True
return filename_with_today_date
def test_model(P, wrapper, loader, steps, logger=None):
metric_logger = MetricLogger(delimiter=" ")
if logger is None:
log_ = print
else:
log_ = logger.log
# Switch to evaluate mode
mode = wrapper.training
wrapper.eval()
wrapper.coord_init()
for n, task_data in enumerate(loader):
task_data = {k: v.to(device, non_blocking=True) for k, v in task_data.items()}
batch_size, context = get_meta_batch(P, task_data)
params, loss_in = inner_adapt(
wrapper,
context,
P.inner_lr,
P.inner_steps_test,
first_order=True,
)
psnr_in = psnr(loss_in)
with torch.no_grad():
loss_out = wrapper(
context,
params=params
)
psnr_out = psnr(loss_out)
metric_logger.meters['loss_in'].update(loss_in.mean().item(), n=batch_size)
metric_logger.meters['loss_out'].update(loss_out.mean().item(), n=batch_size)
metric_logger.meters['psnr_in'].update(psnr_in.mean().item(), n=batch_size)
metric_logger.meters['psnr_out'].update(psnr_out.mean().item(), n=batch_size)
if n * P.test_batch_size > P.max_test_task:
break
# gather the stats from all processes
metric_logger.synchronize_between_processes()
log_(' * [EVAL] [LossIn %.3f] [LossOut %.3f] [PSNRIn %.3f] [PSNROut %.3f]' %
(metric_logger.loss_in.global_avg, metric_logger.loss_out.global_avg,
metric_logger.psnr_in.global_avg, metric_logger.psnr_out.global_avg))
if logger is not None:
logger.scalar_summary('eval/loss_in', metric_logger.loss_in.global_avg, steps)
logger.scalar_summary('eval/loss_out', metric_logger.loss_out.global_avg, steps)
logger.scalar_summary('eval/psnr_in', metric_logger.psnr_in.global_avg, steps)
logger.scalar_summary('eval/psnr_out', metric_logger.psnr_out.global_avg, steps)
wrapper.train(mode)
return metric_logger.psnr_out.global_avg
| 2,328 | 32.271429 | 88 | py |
GradNCP | GradNCP-main/evals/gradient_based/maml_scale.py | import torch
from train.gradient_based import inner_adapt, inner_adapt_test_scale
from utils import MetricLogger, psnr, get_meta_batch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def check(P):
filename_with_today_date = True
return filename_with_today_date
def test_model(P, wrapper, loader, steps, logger=None):
metric_logger = MetricLogger(delimiter=" ")
if logger is None:
log_ = print
else:
log_ = logger.log
# Switch to evaluate mode
mode = wrapper.training
wrapper.eval()
wrapper.coord_init()
for n, task_data in enumerate(loader):
if n * P.test_batch_size > P.max_test_task:
break
task_data = {k: v.to(device, non_blocking=True) for k, v in task_data.items()}
batch_size, context = get_meta_batch(P, task_data)
params, loss_in = inner_adapt(
wrapper,
context,
P.inner_lr,
P.inner_steps_test,
first_order=True,
)
psnr_in = psnr(loss_in)
params_tt_gradscale, loss_in_tt_gradscale = inner_adapt_test_scale(
wrapper,
context,
P.inner_lr,
P.inner_steps_test,
first_order=True,
sample_type=P.sample_type,
scale_type='grad'
)
psnr_in_tt_gradscale = psnr(loss_in_tt_gradscale)
""" outer loss_out aggregate """
with torch.no_grad():
loss_out = wrapper(
context,
params=params
)
psnr_out = psnr(loss_out)
loss_out_tt_gradscale = wrapper(
context,
params=params_tt_gradscale
)
psnr_out_tt_gradscale = psnr(loss_out_tt_gradscale)
metric_logger.meters['loss_in'].update(loss_in.mean().item(), n=batch_size)
metric_logger.meters['loss_out'].update(loss_out.mean().item(), n=batch_size)
metric_logger.meters['psnr_in'].update(psnr_in.mean().item(), n=batch_size)
metric_logger.meters['psnr_out'].update(psnr_out.mean().item(), n=batch_size)
metric_logger.meters['loss_in_tt_gradscale'].update(loss_in_tt_gradscale.mean().item(), n=batch_size)
metric_logger.meters['loss_out_tt_gradscale'].update(loss_out_tt_gradscale.mean().item(), n=batch_size)
metric_logger.meters['psnr_in_tt_gradscale'].update(psnr_in_tt_gradscale.mean().item(), n=batch_size)
metric_logger.meters['psnr_out_tt_gradscale'].update(psnr_out_tt_gradscale.mean().item(), n=batch_size)
if n * P.test_batch_size > P.max_test_task:
break
# gather the stats from all processes
metric_logger.synchronize_between_processes()
log_(' * [EVAL] [LossIn %.3f] [LossOut %.3f] [PSNRIn %.3f] [PSNROut %.3f]' %
(metric_logger.loss_in.global_avg, metric_logger.loss_out.global_avg,
metric_logger.psnr_in.global_avg, metric_logger.psnr_out.global_avg))
log_(' * [EVAL Gradscale TT] [LossInGSTT %.3f] [LossOutGSTT %.3f] [PSNRInGSTT %.3f] [PSNROutGSTT %.3f]' %
(metric_logger.loss_in_tt_gradscale.global_avg, metric_logger.loss_out_tt_gradscale.global_avg,
metric_logger.psnr_in_tt_gradscale.global_avg, metric_logger.psnr_out_tt_gradscale.global_avg))
if logger is not None:
logger.scalar_summary('eval/loss_in', metric_logger.loss_in.global_avg, steps)
logger.scalar_summary('eval/loss_out', metric_logger.loss_out.global_avg, steps)
logger.scalar_summary('eval/psnr_in', metric_logger.psnr_in.global_avg, steps)
logger.scalar_summary('eval/psnr_out', metric_logger.psnr_out.global_avg, steps)
logger.scalar_summary('eval/loss_in_tt_gradscale', metric_logger.loss_in_tt_gradscale.global_avg, steps)
logger.scalar_summary('eval/loss_out_tt_gradscale', metric_logger.loss_out_tt_gradscale.global_avg, steps)
logger.scalar_summary('eval/psnr_in_tt_gradscale', metric_logger.psnr_in_tt_gradscale.global_avg, steps)
logger.scalar_summary('eval/psnr_out_tt_gradscale', metric_logger.psnr_out_tt_gradscale.global_avg, steps)
wrapper.train(mode)
return max(metric_logger.psnr_out.global_avg, metric_logger.psnr_out_tt_gradscale.global_avg)
| 4,273 | 40.495146 | 114 | py |
GradNCP | GradNCP-main/train/__init__.py | import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def setup(mode, P):
fname = f'{P.dataset}_{P.decoder}_{mode}_bs{P.batch_size}_inner{P.inner_steps}'
if mode in ['fomaml', 'maml']:
from train.gradient_based.maml import train_step
from train.gradient_based.maml import check
elif mode in ['maml_bootstrap_param']:
from train.gradient_based.maml_boot import train_step
from train.gradient_based.maml_boot import check
fname += f'_ratio{P.data_ratio}_{P.sample_type}_L{P.inner_steps_boot}_lam{P.lam}'
if P.inner_lr_boot is None:
P.inner_lr_boot = P.inner_lr
else:
raise NotImplementedError()
today = check(P)
if P.no_date:
today = False
fname += f'_seed_{P.seed}'
if P.suffix is not None:
fname += f'_{P.suffix}'
return train_step, fname, today
| 904 | 27.28125 | 89 | py |
GradNCP | GradNCP-main/train/trainer.py | import time
import torch
from common.utils import is_resume
from utils import MetricLogger, save_checkpoint, save_checkpoint_step
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def meta_trainer(P, train_func, test_func, model, optimizer, train_loader, test_loader, logger):
kwargs = {}
kwargs_test = {}
metric_logger = MetricLogger(delimiter=" ")
""" resume option """
is_best, start_step, best, psnr = is_resume(P, model, optimizer)
""" training start """
logger.log_dirname(f"Start training")
for it, train_batch in enumerate(train_loader):
step = start_step + it + 1
if step > P.outer_steps:
break
model.iter = step # update iteration in the model for adaptive ray sampling
stime = time.time()
train_batch = {k: v.to(device, non_blocking=True) for k, v in train_batch.items()}
metric_logger.meters['data_time'].update(time.time() - stime)
train_func(P, step, model, optimizer, train_batch,
metric_logger=metric_logger, logger=logger, **kwargs)
""" evaluation & save the best model """
if step % P.eval_step == 0:
psnr = test_func(P, model, test_loader, step, logger=logger, **kwargs_test)
if best < psnr:
best = psnr
save_checkpoint(P, step, best, model, optimizer.state_dict(),
logger.logdir, is_best=True, data_parallel=P.data_parallel)
logger.scalar_summary('eval/best', best, step)
logger.log('[EVAL] [Step %3d] [PSNR %5.2f] [Best %5.2f]' % (step, psnr, best))
""" save model per save_step steps"""
if step % P.save_step == 0:
save_checkpoint_step(P, step, best, model, optimizer.state_dict(),
logger.logdir, data_parallel=P.data_parallel)
""" save last model"""
save_checkpoint(P, P.outer_steps, best, model, optimizer.state_dict(),
logger.logdir, data_parallel=P.data_parallel)
| 2,065 | 35.245614 | 96 | py |
GradNCP | GradNCP-main/train/gradient_based/maml.py | import time
import torch
from train.gradient_based import inner_adapt
from utils import psnr, get_meta_batch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def check(P):
filename_with_today_date = True
return filename_with_today_date
def train_step(P, steps, wrapper, optimizer, task_data, metric_logger, logger):
stime = time.time()
wrapper.train()
batch_size, context = get_meta_batch(P, task_data)
# Run inner loop
wrapper.support = True
params, loss_in = inner_adapt(
wrapper,
context,
P.inner_lr,
P.inner_steps,
first_order=P.mode == 'fomaml',
)
""" outer loss aggregate """
wrapper.support = False
loss = wrapper(
context,
params=params
)
""" outer gradient step """
optimizer.zero_grad()
loss.mean().backward()
torch.nn.utils.clip_grad_norm_(wrapper.decoder.parameters(), 1.0)
optimizer.step()
torch.cuda.synchronize()
""" track stat """
metric_logger.meters['batch_time'].update(time.time() - stime, n=batch_size)
metric_logger.meters['loss_in'].update(loss_in.mean().item(), n=batch_size)
metric_logger.meters['loss_out'].update(loss.mean().item(), n=batch_size)
metric_logger.meters['psnr_in'].update(psnr(loss_in).mean().item(), n=batch_size)
metric_logger.meters['psnr_out'].update(psnr(loss).mean().item(), n=batch_size)
metric_logger.synchronize_between_processes()
if steps % P.print_step == 0:
logger.log_dirname(f"Step {steps}")
logger.scalar_summary('train/loss_in',
metric_logger.loss_in.global_avg, steps)
logger.scalar_summary('train/loss_out',
metric_logger.loss_out.global_avg, steps)
logger.scalar_summary('train/psnr_in',
metric_logger.psnr_in.global_avg, steps)
logger.scalar_summary('train/psnr_out',
metric_logger.psnr_out.global_avg, steps)
logger.scalar_summary('train/batch_time',
metric_logger.batch_time.global_avg, steps)
logger.log('[TRAIN] [Step %3d] [Time %.3f] [Data %.3f] '
'[LossIn %f] [LossOut %f] [PSNRIn %.3f] [PSNROut %.3f]' %
(steps, metric_logger.batch_time.global_avg, metric_logger.data_time.global_avg,
metric_logger.loss_in.global_avg, metric_logger.loss_out.global_avg,
metric_logger.psnr_in.global_avg, metric_logger.psnr_out.global_avg))
metric_logger.reset()
| 2,608 | 33.786667 | 99 | py |
GradNCP | GradNCP-main/train/gradient_based/__init__.py | from collections import OrderedDict
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_grad_norm(grads, bs, detach=True):
grad_norm_list = []
for grad in grads:
if grad is None:
grad_norm = 0
else:
if detach:
grad_norm = torch.norm(
grad.data.view(bs, -1), p=2, dim=1, keepdim=True
)
else:
grad_norm = torch.norm(
grad.view(bs, -1), p=2, dim=1, keepdim=True
)
grad_norm_list.append(grad_norm)
return torch.norm(torch.cat(grad_norm_list, dim=1), p=2, dim=1)
def inner_adapt(wrapper, task_data, step_size=1e-2, num_steps=3,
first_order=False, params=None, sample_type='none'):
loss = 0.
batch_size = task_data[0].size(0)
params = wrapper.get_batch_params(params, batch_size)
""" inner gradient step """
for step_inner in range(num_steps):
if sample_type != 'none':
wrapper.sample(sample_type, task_data, params)
params, loss = inner_loop_step(
wrapper,
params,
task_data,
step_size,
first_order,
)
return params, loss
def inner_loop_step(
wrapper,
params,
task_data,
inner_lr=1e-2,
first_order=False,
):
"""Performs a single inner loop step."""
batch_size = len(task_data[0])
wrapper.decoder.zero_grad()
with torch.enable_grad():
loss = wrapper(task_data, params=params)
grads = torch.autograd.grad(
loss.mean() * batch_size,
params.values(),
create_graph=not first_order,
allow_unused=True
)
updated_params = OrderedDict()
for (name, param), grad in zip(params.items(), grads):
if grad is None:
grad = 0.
updated_params[name] = param - inner_lr * grad
return updated_params, loss
def inner_adapt_test_scale(wrapper, task_data, step_size=1e-2, num_steps=3,
first_order=False, params=None, sample_type='none',
scale_type='loss'):
loss = 0.
batch_size = task_data[0].size(0)
params = wrapper.get_batch_params(params, batch_size)
""" inner gradient step """
for step_inner in range(num_steps):
if sample_type != 'none':
wrapper.sample(sample_type, task_data, params)
params, loss = inner_test_gradscale_loop_step(
wrapper,
params,
task_data,
step_size,
first_order,
scale_type,
)
return params, loss
def inner_test_gradscale_loop_step(
wrapper,
params,
task_data,
inner_lr=1e-2,
first_order=False,
scale_type='grad',
):
"""Performs a single inner loop step."""
batch_size = len(task_data[0])
wrapper.decoder.zero_grad()
with torch.enable_grad():
subsample_loss = wrapper(task_data, params=params)
subsample_grad = torch.autograd.grad(
subsample_loss.mean() * batch_size,
params.values(),
create_graph=False,
allow_unused=True
)
wrapper.decoder.zero_grad()
wrapper.coord_init()
with torch.enable_grad():
loss = wrapper(task_data, params=params)
grads = torch.autograd.grad(
loss.mean() * batch_size,
params.values(),
create_graph=not first_order,
allow_unused=True
)
updated_params = OrderedDict()
if scale_type == 'grad':
subsample_grad_norm = get_grad_norm(subsample_grad, batch_size, detach=True)
grads_norm = get_grad_norm(grads, batch_size, detach=True)
grads_scale = subsample_grad_norm / (grads_norm + 1e-16)
else:
raise NotImplementedError()
for (name, param), grad in zip(params.items(), grads):
if grad is None:
grad = 0.
else:
grads_scale_ = grads_scale.view(
(batch_size,) + (1,) * (len(grad.shape) - 1)
).detach()
updated_params[name] = param - inner_lr * grads_scale_ * grad
return updated_params, loss
| 4,332 | 26.775641 | 88 | py |
GradNCP | GradNCP-main/train/gradient_based/maml_boot.py | import time
import torch
from train.gradient_based import inner_adapt
from utils import psnr, get_meta_batch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def check(P):
filename_with_today_date = True
return filename_with_today_date
def param_consistency(P, params, params_bootstrap, bs):
param_norm = []
for (name, param) in params.items():
updated_param = params_bootstrap[name].detach() - param
updated_param = updated_param.view(bs, -1)
param_norm.append(torch.norm(updated_param, p=2, dim=1, keepdim=True))
return torch.norm(torch.cat(param_norm, dim=1), p=2, dim=1).mean()
def train_step(P, steps, wrapper, optimizer, task_data, metric_logger, logger):
stime = time.time()
wrapper.train()
batch_size, context = get_meta_batch(P, task_data)
wrapper.support = True
params, loss_in = inner_adapt(
wrapper,
context,
P.inner_lr,
P.inner_steps,
first_order=P.mode == 'fomaml',
sample_type=P.sample_type,
)
wrapper.coord_init()
wrapper.support = False
params_boot, loss_in_boot = inner_adapt(
wrapper,
context,
P.inner_lr_boot,
P.inner_steps_boot,
first_order=True,
params=params,
)
""" outer loss aggregate """
wrapper.coord_init()
loss_out = wrapper(
context,
params=params
)
loss_boot = P.lam * param_consistency(P, params, params_boot, batch_size)
loss = loss_out.mean() + loss_boot
""" outer gradient step """
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(wrapper.decoder.parameters(), 1.0)
optimizer.step()
torch.cuda.synchronize()
""" track stat """
metric_logger.meters['batch_time'].update(time.time() - stime, n=batch_size)
metric_logger.meters['loss_in'].update(loss_in.mean().item(), n=batch_size)
metric_logger.meters['loss_out'].update(loss_out.mean().item(), n=batch_size)
metric_logger.meters['psnr_in'].update(psnr(loss_in).mean().item(), n=batch_size)
metric_logger.meters['psnr_out'].update(psnr(loss_out).mean().item(), n=batch_size)
metric_logger.synchronize_between_processes()
if steps % P.print_step == 0:
logger.log_dirname(f"Step {steps}")
logger.scalar_summary('train/loss_in',
metric_logger.loss_in.global_avg, steps)
logger.scalar_summary('train/loss_out',
metric_logger.loss_out.global_avg, steps)
logger.scalar_summary('train/psnr_in',
metric_logger.psnr_in.global_avg, steps)
logger.scalar_summary('train/psnr_out',
metric_logger.psnr_out.global_avg, steps)
logger.scalar_summary('train/batch_time',
metric_logger.batch_time.global_avg, steps)
logger.log('[TRAIN] [Step %3d] [Time %.3f] [Data %.3f] '
'[LossIn %f] [LossOut %f] [PSNRIn %.3f] [PSNROut %.3f]' %
(steps, metric_logger.batch_time.global_avg, metric_logger.data_time.global_avg,
metric_logger.loss_in.global_avg, metric_logger.loss_out.global_avg,
metric_logger.psnr_in.global_avg, metric_logger.psnr_out.global_avg))
metric_logger.reset()
| 3,367 | 33.367347 | 99 | py |
GradNCP | GradNCP-main/data/dataset.py | import torchvision.transforms as T
from torchvision import datasets
from torch.utils.data import Dataset
DATA_PATH = '/data'
class ImgDataset(Dataset):
def __init__(self, data, sdf=False):
self.data = data
self.sdf = sdf
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[idx]
if not self.sdf:
x = x[0]
return {
'imgs': x,
}
def get_dataset(P, dataset, only_test=False):
"""
Load dataloaders for an image dataset, center-cropped to a resolution.
"""
val_set = None
P.data_size = None
if dataset == 'celeba':
T_base = T.Compose([
T.Resize(178),
T.CenterCrop(178),
T.ToTensor()
])
train_set = ImgDataset(
datasets.CelebA(DATA_PATH, split='train',
target_type='attr', transform=T_base)
)
test_set = ImgDataset(
datasets.CelebA(DATA_PATH, split='test',
target_type='attr', transform=T_base)
)
P.data_type = 'img'
P.dim_in, P.dim_out = 2, 3
P.data_size = (3, 178, 178)
else:
raise NotImplementedError()
P.train_set = train_set
if only_test:
return test_set
val_set = test_set if val_set is None else val_set
return train_set, val_set
| 1,413 | 22.566667 | 74 | py |
torch2trt | torch2trt-master/setup.py | import sys
import tensorrt
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
from packaging import version
def trt_inc_dir():
return "/usr/include/aarch64-linux-gnu"
def trt_lib_dir():
return "/usr/lib/aarch64-linux-gnu"
ext_modules = []
exclude_dir = ["torch2trt/contrib","torch2trt/contrib.*"]
compile_args_cxx = []
if version.parse(torch.__version__) < version.parse('1.5'):
compile_args_cxx.append('-DUSE_DEPRECATED_INTLIST')
if version.parse(tensorrt.__version__) < version.parse('8'):
compile_args_cxx.append('-DPRE_TRT8')
plugins_ext_module = CUDAExtension(
name='plugins',
sources=[
'torch2trt/plugins/plugins.cpp'
],
include_dirs=[
trt_inc_dir()
],
library_dirs=[
trt_lib_dir()
],
libraries=[
'nvinfer'
],
extra_compile_args={
'cxx': compile_args_cxx,
'nvcc': []
}
)
if '--plugins' in sys.argv:
ext_modules.append(plugins_ext_module)
sys.argv.remove('--plugins')
if '--contrib' in sys.argv:
exclude_dir=[]
sys.argv.remove('--contrib')
setup(
name='torch2trt',
version='0.4.0',
description='An easy to use PyTorch to TensorRT converter',
packages=find_packages(exclude=exclude_dir),
ext_package='torch2trt',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension}
)
| 1,504 | 23.672131 | 81 | py |
torch2trt | torch2trt-master/build.py | import imp
import subprocess
import os
from string import Template
PLUGINS = [
'interpolate',
'group_norm',
]
BASE_FOLDER = 'torch2trt/converters'
NINJA_TEMPLATE = Template((
"rule link\n"
" command = g++ -shared -o $$out $$in -L$torch_dir/lib -L$cuda_dir/lib64 -L$trt_lib_dir -lc10 -lc10_cuda -ltorch -lcudart -lprotobuf -lprotobuf-lite -pthread -lpthread -lnvinfer\n"
"rule protoc\n"
" command = protoc $$in --cpp_out=. --python_out=.\n"
"rule cxx\n"
" command = g++ -c -fPIC $$in -I$cuda_dir/include -I$torch_dir/include -I$torch_dir/include/torch/csrc/api/include -I. -std=c++11 -I$trt_inc_dir\n"
))
PLUGIN_TEMPLATE = Template((
"build $plugin_dir/$plugin.pb.h $plugin_dir/$plugin.pb.cc $plugin_dir/${plugin}_pb2.py: protoc $plugin_dir/$plugin.proto\n"
"build $plugin.pb.o: cxx $plugin_dir/$plugin.pb.cc\n"
"build $plugin.o: cxx $plugin_dir/$plugin.cpp\n"
))
def build(cuda_dir="/usr/local/cuda",
torch_dir=imp.find_module('torch')[1],
trt_inc_dir="/usr/include/aarch64-linux-gnu",
trt_lib_dir="/usr/lib/aarch64-linux-gnu"):
global PLUGINS, BASE_FOLDER, NINJA_TEMPLATE, PLUGIN_TEMPLATE
NINJA_STR = NINJA_TEMPLATE.substitute({
'torch_dir': torch_dir,
'cuda_dir': cuda_dir,
'trt_inc_dir': trt_inc_dir,
'trt_lib_dir': trt_lib_dir,
})
plugin_o_files = []
for plugin in PLUGINS:
NINJA_STR += \
PLUGIN_TEMPLATE.substitute({
'plugin': plugin,
'plugin_dir': os.path.join(BASE_FOLDER, plugin),
})
plugin_o_files += [plugin + '.pb.o', plugin + '.o']
NINJA_STR += Template((
"build torch2trt/libtorch2trt.so: link $o_files\n"
)).substitute({'o_files': ' '.join(plugin_o_files)})
with open('build.ninja', 'w') as f:
f.write(NINJA_STR)
subprocess.call(['ninja'])
if __name__ == '__main__':
build()
| 1,941 | 28.876923 | 185 | py |
torch2trt | torch2trt-master/examples/easyocr/generate_data.py | from argparse import ArgumentParser
import cv2
import torch
import glob
from easyocr import Reader
from torch2trt.dataset import FolderDataset
from torch2trt import torch2trt, TRTModule
import math
import os
parser = ArgumentParser()
parser.add_argument('--images', type=str, default='images')
parser.add_argument('--detector_data', type=str, default='detector_data')
parser.add_argument('--recognizer_data', type=str, default='recognizer_data')
parser.add_argument('--max_image_area', type=int, default=1280*720)
parser.add_argument('--recognizer_batch_size', type=int, default=1)
args = parser.parse_args()
reader = Reader(['en'])
detector_dataset = FolderDataset(args.detector_data)
recognizer_dataset = FolderDataset(args.recognizer_data)
def shrink_to_area(image, area):
height = image.shape[0]
width = image.shape[1]
if height * width > area:
ar = width / height
new_height = math.sqrt(area / ar)
new_width = ar * new_height
new_height = math.floor(new_height)
new_width = math.floor(new_width)
print(f'Resizing {width}x{height} to {new_width}x{new_height}')
image = cv2.resize(image, (new_width, new_height))
return image
with detector_dataset.record(reader.detector.module):
with recognizer_dataset.record(reader.recognizer.module):
for path in glob.glob(os.path.join(args.images, '*.jpg')):
print(path)
image = cv2.imread(path)
image = shrink_to_area(image, args.max_image_area)
reader.readtext(image, batch_size=args.recognizer_batch_size)
| 1,605 | 28.740741 | 77 | py |
torch2trt | torch2trt-master/examples/easyocr/optimize_recognizer.py | from argparse import ArgumentParser
from torch2trt.dataset import FolderDataset
from torch2trt import torch2trt, TRTModule
from easyocr import Reader
import tensorrt as trt
import torch
import time
from tempfile import mkdtemp
parser = ArgumentParser()
parser.add_argument('--detector_data', type=str, default='detector_data')
parser.add_argument('--recognizer_data', type=str, default='recognizer_data')
parser.add_argument('--output', type=str, default='recognizer_trt.pth')
parser.add_argument('--int8', action='store_true')
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--max_workspace_size', type=int, default=1<<28)
args = parser.parse_args()
detector_dataset = FolderDataset(args.detector_data)
recognizer_dataset = FolderDataset(args.recognizer_data)
if len(detector_dataset) == 0:
raise ValueError('Detector dataset is empty, make sure to run generate_data.py first.')
if len(recognizer_dataset) == 0:
raise ValueError('Recognizer dataset is empty, make sure to run generate_data.py first.')
if args.int8:
num_calib = 200
calib_dataset = FolderDataset(mkdtemp())
for i in range(num_calib):
calib_dataset.insert(tuple([t.float() + 0.2 * torch.randn_like(t.float()) for t in recognizer_dataset[i % len(recognizer_dataset)]]))
reader = Reader(['en'])
module_torch = reader.detector.module
max_shapes = list(recognizer_dataset.max_shapes())
# override default max shape to use full image width
max_shapes[0] = torch.Size((
recognizer_dataset.max_shapes()[0][0],
recognizer_dataset.max_shapes()[0][1],
recognizer_dataset.max_shapes()[0][2],
detector_dataset.max_shapes()[0][3]
))
max_shapes = tuple(max_shapes)
class PoolFix(torch.nn.Module):
def forward(self, x):
return torch.mean(x, dim=-1, keepdim=True)
if isinstance(reader.recognizer.module.AdaptiveAvgPool, torch.nn.AdaptiveAvgPool2d):
reader.recognizer.module.AdaptiveAvgPool = PoolFix()
recognizer_torch = reader.recognizer.module
print('Running torch2trt...')
recognizer_trt = torch2trt(
reader.recognizer.module,
recognizer_dataset,
max_shapes=max_shapes,
use_onnx=True, # LSTM currently only implemented in ONNX workflow
fp16_mode=args.fp16,
int8_mode=args.int8,
max_workspace_size=args.max_workspace_size,
log_level=trt.Logger.VERBOSE
)
# recognizer_trt.ignore_inputs = [1]
torch.save(recognizer_trt.state_dict(), args.output)
def profile_module(module, dataset, count=None):
if count is None:
count = len(dataset)
output = module(*dataset[0]) # warmup
torch.cuda.current_stream().synchronize()
t0 = time.monotonic()
for i in range(count):
output = module(*dataset[i % len(dataset)])
torch.cuda.current_stream().synchronize()
t1 = time.monotonic()
return count / (t1 - t0)
print('Profiling PyTorch...')
fps_torch = profile_module(recognizer_torch, recognizer_dataset, 50)
print(f'FPS Torch: {fps_torch}')
print('Profiling TensorRT')
fps_trt = profile_module(recognizer_trt, recognizer_dataset, 30)
print(f'FPS TensorRT: {fps_trt}') | 3,095 | 30.917526 | 141 | py |
torch2trt | torch2trt-master/examples/easyocr/run_end2end.py | from argparse import ArgumentParser
import cv2
import torch
import glob
from easyocr import Reader
from torch2trt.dataset import FolderDataset
from torch2trt import torch2trt, TRTModule
import math
import time
import os
parser = ArgumentParser()
parser.add_argument('--images', type=str, default='images')
parser.add_argument('--detector_trt', type=str, default='detector_trt.pth')
parser.add_argument('--recognizer_trt', type=str, default='recognizer_trt.pth')
parser.add_argument('--max_image_area', type=int, default=1280*720)
parser.add_argument('--count', type=int, default=None)
parser.add_argument('--recognizer_batch_size', type=int, default=1)
args = parser.parse_args()
def shrink_to_area(image, area):
height = image.shape[0]
width = image.shape[1]
if height * width > area:
ar = width / height
new_height = math.sqrt(area / ar)
new_width = ar * new_height
new_height = math.floor(new_height)
new_width = math.floor(new_width)
print(f'Resizing {width}x{height} to {new_width}x{new_height}')
image = cv2.resize(image, (new_width, new_height))
return image
image_paths = glob.glob(os.path.join(args.images, '*.jpg'))
def profile_reader(reader):
cumulative_execution_time = 0
if args.count is None:
count = len(image_paths)
else:
count = args.count
for i in range(count):
path = image_paths[i % len(image_paths)]
image = cv2.imread(path)
image = shrink_to_area(image, args.max_image_area)
t0 = time.monotonic()
reader.readtext(image, batch_size=args.recognizer_batch_size)
t1 = time.monotonic()
cumulative_execution_time += (t1 - t0)
return count / cumulative_execution_time
reader = Reader(['en'])
detector_trt = TRTModule()
detector_trt.load_state_dict(torch.load(args.detector_trt))
recognizer_trt = TRTModule()
recognizer_trt.load_state_dict(torch.load(args.recognizer_trt))
test_image = shrink_to_area(cv2.imread(image_paths[0]), args.max_image_area)
print('Dumping torch output...')
print(reader.readtext(test_image, batch_size=args.recognizer_batch_size))
print('Profiling torch...')
fps_torch = profile_reader(reader)
reader.detector.module = detector_trt
reader.recognizer.module = recognizer_trt
print('Dumping TensorRT output...')
print(reader.readtext(test_image, batch_size=args.recognizer_batch_size))
print('Profiling torch...')
fps_trt = profile_reader(reader)
print(f'FPS Torch: {fps_torch}')
print(f'FPS TensorRT: {fps_trt}') | 2,555 | 26.782609 | 79 | py |
torch2trt | torch2trt-master/examples/easyocr/optimize_detector.py | from argparse import ArgumentParser
from torch2trt.dataset import FolderDataset, ListDataset
from torch2trt import torch2trt, TRTModule
from easyocr import Reader
import tensorrt as trt
import torch
import time
from tempfile import mkdtemp
parser = ArgumentParser()
parser.add_argument('--detector_data', type=str, default='detector_data')
parser.add_argument('--output', type=str, default='detector_trt.pth')
parser.add_argument('--int8', action='store_true')
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--dla', action='store_true')
parser.add_argument('--dla_core', type=int, default=0)
args = parser.parse_args()
detector_dataset = FolderDataset(args.detector_data)
if len(detector_dataset) == 0:
raise ValueError('Detector dataset is empty, make sure to run generate_data.py first.')
reader = Reader(['en'])
detector_torch = reader.detector.module
if args.int8:
num_calib = 5
calib_dataset = FolderDataset(mkdtemp())
for i in range(num_calib):
calib_dataset.insert(tuple([t + 0.2 * torch.randn_like(t) for t in detector_dataset[i % len(detector_dataset)]]))
print('Running torch2trt...')
detector_trt = torch2trt(
detector_torch,
detector_dataset,
int8_mode=args.int8,
fp16_mode=args.fp16,
default_device_type=trt.DeviceType.DLA if args.dla else trt.DeviceType.GPU,
max_workspace_size=1 << 26,
log_level=trt.Logger.VERBOSE,
int8_calib_dataset=calib_dataset if args.int8 else None,
int8_calib_algorithm=trt.CalibrationAlgoType.MINMAX_CALIBRATION,
use_onnx=True
)
torch.save(detector_trt.state_dict(), args.output)
def profile_module(module, dataset, count=None):
if count is None:
count = len(dataset)
output = module(*dataset[0]) # warmup
torch.cuda.current_stream().synchronize()
t0 = time.monotonic()
for i in range(count):
output = module(*dataset[i % len(dataset)])
torch.cuda.current_stream().synchronize()
t1 = time.monotonic()
return count / (t1 - t0)
print('Profiling PyTorch...')
fps_torch = profile_module(detector_torch, detector_dataset, 30)
print(f'FPS Torch: {fps_torch}')
print('Profiling TensorRT')
fps_trt = profile_module(detector_trt, detector_dataset, 30)
print(f'FPS TensorRT: {fps_trt}') | 2,271 | 31 | 121 | py |
torch2trt | torch2trt-master/examples/contrib/quantization_aware_training/parser.py | import argparse
def parse_args():
"""
"""
parser = argparse.ArgumentParser(description='PyTorch QAT')
parser.add_argument('--tl','--transfer_learning',action='store_true',help='used to map weights correctly')
parser.add_argument('--iter',default=300, type=int, help='no of iterations')
parser.add_argument('--m','--model_name',default=None,help="Name of the model")
parser.add_argument('--b', '--batch_size', default=128, type=int, help='mini-batch size (default: 32)')
parser.add_argument('--optimizer', default='Adam', type=str,help='type of optimizer (default=Adam)')
parser.add_argument( '--wd','--weight-decay', default=1e-5, type=float, help='weight decay (default: 1e-5)')
parser.add_argument('--start_epoch','--s_ep', default=0, type=int, help='starting epoch')
parser.add_argument('--num_epochs',default=30,type=int, help='no of epochs')
parser.add_argument('--no_cuda', action='store_true',help='disables cuda training')
parser.add_argument('--seed', type=int, default=12345,help='random seed for experiments. [default: 12345]')
parser.add_argument('--lr', '--learning_rate', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--lrdt', '--learning_rate_decay_interval', default=30, type=int, help='initial learning rate decay after n epochs')
parser.add_argument('--od','--output_dir', default='/tmp/',help='output path')
parser.add_argument('--en','--exp_name', default='pytorch_exp',help = 'experiment name to create output dir')
parser.add_argument('--load_ckpt', default = None, help = "path to ckpt")
parser.add_argument('--netqat',action='store_true',help = 'quantize model using custom layer')
parser.add_argument('--partial_ckpt',action='store_true',help = 'load_partial checkpoint')
parser.add_argument('--v','--verbose',action='store_true')
parser.add_argument('--FP16',action='store_true',help='run TRT engine at FP16')
parser.add_argument('--test_trt',action='store_true',help='gather metrics using trt')
parser.add_argument('--INT8PTC',action='store_true',help='run TRT engine at INT8 with Post Training Cal')
parser.add_argument('--INT8QAT',action='store_true',help='run TRT engine at INT8 with QAT')
args = parser.parse_args()
return args
| 2,300 | 70.90625 | 140 | py |
torch2trt | torch2trt-master/examples/contrib/quantization_aware_training/infer.py | import timeit
import torch
import torch.nn as nn
import numpy as np
import torchvision
import argparse
import os,sys
from datasets.cifar10 import Cifar10Loaders
from utils.utilities import calculate_accuracy, timeGraph,printStats
from models.resnet import resnet18,resnet34
from parser import parse_args
from torch2trt import torch2trt
import tensorrt as trt
torch.set_printoptions(precision=5)
def main():
args = parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(78543)
if args.cuda:
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(args.seed)
loaders = Cifar10Loaders()
train_loader = loaders.train_loader()
test_loader = loaders.test_loader()
if args.m == "resnet18":
if args.netqat:
model=resnet18(qat_mode=True,infer=True)
else:
model=resnet18()
elif args.m == "resnet34":
if args.netqat:
model=resnet34(qat_mode=True,infer=True)
else:
model=resnet34()
else:
raise NotImplementedError("{} model not found".format(args.m))
model = model.cuda().eval()
if args.load_ckpt:
checkpoint = torch.load(args.load_ckpt)
if not args.netqat:
checkpoint = mapping_names_resnets(checkpoint)
model.load_state_dict(checkpoint['model_state_dict'],strict=True)
print("===>>> Checkpoint loaded successfully from {} ".format(args.load_ckpt))
test_accuracy = calculate_accuracy(model,test_loader)
print(" Test accuracy for Pytorch model: {0} ".format(test_accuracy))
rand_in = torch.randn([128,3,32,32],dtype=torch.float32).cuda()
#Converting the model to TRT
if args.FP16:
trt_model_fp16 = torch2trt(model,[rand_in],log_level=trt.Logger.INFO,fp16_mode=True,max_batch_size=128)
test_accuracy = calculate_accuracy(trt_model_fp16,test_loader)
print(" TRT test accuracy at FP16: {0}".format(test_accuracy))
if args.INT8QAT:
trt_model_int8 = torch2trt(model,[rand_in],log_level=trt.Logger.INFO,fp16_mode=True,int8_mode=True,max_batch_size=128,qat_mode=True)
test_accuracy = calculate_accuracy(trt_model_int8,test_loader)
print(" TRT test accuracy at INT8 QAT: {0}".format(test_accuracy))
if args.INT8PTC:
##preparing calib dataset
calib_dataset = list()
for i, sam in enumerate(test_loader):
calib_dataset.extend(sam[0])
if i ==5:
break
trt_model_calib_int8 = torch2trt(model,[rand_in],log_level=trt.Logger.INFO,fp16_mode=True,int8_calib_dataset=calib_dataset,int8_mode=True,max_batch_size=128)
test_accuracy = calculate_accuracy(trt_model_calib_int8,test_loader)
print(" TRT test accuracy at INT8 PTC: {0}".format(test_accuracy))
if __name__ == "__main__":
main()
| 2,897 | 34.341463 | 165 | py |
torch2trt | torch2trt-master/examples/contrib/quantization_aware_training/train.py | import torch
import torch.nn as nn
import numpy as np
import torchvision
import argparse
import os,sys
import torch.optim as optim
from datasets.cifar10 import Cifar10Loaders
from models.models import vanilla_cnn
from models.resnet import resnet18 , resnet34
from utils.utilities import calculate_accuracy , add_missing_keys, transfer_learning_resnet18,transfer_learning_resnet34, mapping_names
from parser import parse_args
import time
from torch2trt import torch2trt
import tensorrt as trt
def main():
args = parse_args()
## Create an output dir
output_dir_path = args.od + args.en
if not os.path.exists(output_dir_path):
os.makedirs(output_dir_path)
dir_name=output_dir_path
else:
counter=1
dir_name = output_dir_path
new_dir_name = dir_name
while os.path.exists(new_dir_name):
new_dir_name = dir_name + "_" + str(counter)
counter +=1
os.makedirs(new_dir_name)
dir_name=new_dir_name
print("===>> Output folder = {}".format(dir_name))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(args.seed)
loaders = Cifar10Loaders()
train_loader = loaders.train_loader()
test_loader = loaders.test_loader()
if args.m =="resnet18":
if args.netqat:
model=resnet18(qat_mode=True)
else:
model=resnet18()
elif args.m =="resnet34":
if args.netqat:
model=resnet34(qat_mode=True)
else:
model=resnet34()
elif args.m == 'resnet34-tl':
model = transfer_learning_resnet34()
elif args.m == "resnet18-tl": ## resnet18 transfer learning
model=transfer_learning_resnet18()
else:
raise NotImplementedError("model {} is not defined".format(args.m))
if args.cuda:
model = model.cuda()
best_test_accuracy=0
if args.v:
print("======>>> keys present in state dict at model creation")
for k,_ in model.state_dict().items():
print(k)
if args.load_ckpt:
model.eval()
checkpoint = torch.load(args.load_ckpt)
if args.partial_ckpt:
model_state = checkpoint['model_state_dict']
if args.v:
print("====>>>>> keys present in the ckpt state dict")
for k,_ in model_state.items():
print(k)
if args.tl:
model_state = mapping_names(model_state)
new_state_dict = add_missing_keys(model.state_dict(),model_state)
model.load_state_dict(new_state_dict,strict=True)
else:
model.load_state_dict(checkpoint['model_state_dict'],strict=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9)
if args.load_ckpt:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
print("===>>> Checkpoint loaded successfully from {} at epoch {} ".format(args.load_ckpt,epoch))
print("===>> Training started")
for epoch in range(args.start_epoch, args.start_epoch + args.num_epochs):
running_loss=0.0
start=time.time()
model.train()
for i, data in enumerate(train_loader,0):
inputs, labels = data
if args.cuda:
inputs = inputs.cuda()
labels = labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs,labels)
loss.backward()
optimizer.step()
running_loss +=loss.item()
if epoch > 0 and epoch % args.lrdt == 0:
print("===>> decaying learning rate at epoch {}".format(epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.94
running_loss /= len(train_loader)
end = time.time()
test_accuracy = calculate_accuracy(model,test_loader)
print("Epoch: {0} | Loss: {1} | Test accuracy: {2}| Time Taken (sec): {3} ".format(epoch+1, np.around(running_loss,6), test_accuracy, np.around((end-start),4)))
##Save the best checkpoint
if test_accuracy > best_test_accuracy:
best_ckpt_filename = dir_name + "/ckpt_" + str(epoch)
best_test_accuracy = test_accuracy
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': running_loss,
}, best_ckpt_filename)
print("Training finished")
## Running metrics
if args.test_trt:
if args.m == 'resnet34-tl' or args.m == 'resnet34':
model = transfer_learning_resnet34(pretrained=False)
elif args.m == 'resnet18-tl' or args.m == 'resnet18':
model= transfer_learning_resnet18(pretrained=False)
else:
raise NotImplementedError("model {} is not defined".format(args.m))
model=model.cuda().eval()
checkpoint = torch.load(best_ckpt_filename)
model.load_state_dict(checkpoint['model_state_dict'],strict=True)
pytorch_test_accuracy = calculate_accuracy(model,test_loader)
rand_in = torch.randn([128,3,32,32],dtype=torch.float32).cuda()
if args.FP16:
trt_model_fp16 = torch2trt(model,[rand_in],log_level=trt.Logger.INFO,fp16_mode=True,max_batch_size=128)
trtfp16_test_accuracy = calculate_accuracy(trt_model_fp16,test_loader)
if args.INT8PTC:
##preparing calib dataset
calib_dataset = list()
for i, sam in enumerate(test_loader):
calib_dataset.extend(sam[0])
if i ==5:
break
trt_model_calib_int8 = torch2trt(model,[rand_in],log_level=trt.Logger.INFO,fp16_mode=True,int8_calib_dataset=calib_dataset,int8_mode=True,max_batch_size=128)
int8_test_accuracy = calculate_accuracy(trt_model_calib_int8,test_loader)
print("Test Accuracy")
print("Pytorch model :",pytorch_test_accuracy)
print("TRT FP16 model :",trtfp16_test_accuracy)
print("TRT INT8 PTC model :",int8_test_accuracy)
if __name__ == "__main__":
main()
| 6,565 | 35.276243 | 169 | py |
torch2trt | torch2trt-master/examples/contrib/quantization_aware_training/models/resnet.py | """
Resnet implementation from Pytorch
"""
import torch
import torch.nn as nn
from utils.utilities import qrelu,qconv2d
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1,norm=False,act=False,qat_mode=False,infer=False):
"""3x3 convolution with padding"""
return qconv2d(in_channels=in_planes,out_channels=out_planes,kernel_size=3,stride=stride,groups=groups,padding=dilation,dilation=dilation,bias=False,act=act,norm=norm,qat=qat_mode,infer=infer)
def conv1x1(in_planes, out_planes, stride=1,norm=False,act=False,qat_mode=False,infer=False):
"""1x1 convolution"""
return qconv2d(in_channels=in_planes,out_channels=out_planes,kernel_size=1,stride=stride,bias=False,act=act,norm=norm,qat=qat_mode,infer=infer)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,qat_mode=False,infer=False):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride,norm=True,qat_mode=qat_mode,infer=infer)
#self.bn1 = norm_layer(planes)
self.relu1 = qrelu(inplace=True,qat=qat_mode,infer=infer)
self.conv2 = conv3x3(planes, planes,norm=True,qat_mode=qat_mode,infer=infer)
#self.bn2 = norm_layer(planes)
self.relu2 = qrelu(inplace=True,qat=qat_mode,infer=infer)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
#out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
#out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,qat_mode=False):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width,qat_mode=qat_mode)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation,qat_mode=qat_mode)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion,qat_mode=qat_mode)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None,qat_mode=False,infer=False):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = qconv2d(in_channels=3, out_channels=self.inplanes, kernel_size=7, stride=2,padding=3,bias=False,norm=True,act=False,qat=qat_mode,infer=infer)
#self.bn1 = norm_layer(self.inplanes)
self.relu = qrelu(inplace=True,qat=qat_mode,infer=infer)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0],qat_mode=qat_mode,infer=infer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0],qat_mode=qat_mode,infer=infer)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1],qat_mode=qat_mode,infer=infer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2],qat_mode=qat_mode,infer=infer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False,qat_mode=False,infer=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion,stride= stride,norm=True,qat_mode=qat_mode,infer=infer),
#norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer,qat_mode,infer=infer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer,qat_mode=qat_mode,infer=infer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
#x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 14,399 | 40.618497 | 196 | py |
torch2trt | torch2trt-master/examples/contrib/quantization_aware_training/models/models.py | '''
Contains basic model definitions
'''
import torch
import torch.nn as nn
from utils.utilities import qrelu,qconv2d
class vanilla_cnn(nn.Module):
def __init__(self,qat_mode=False,infer=False):
super().__init__()
self.qat = qat_mode
self.layer1=qconv2d(3,32,padding=1,qat=qat_mode,infer=infer)
self.layer2=qconv2d(32,64,padding=1,qat=qat_mode,infer=infer)
self.layer3=qconv2d(64,128,padding=1,qat=qat_mode,infer=infer)
self.layer4=qconv2d(128,256,padding=1,qat=qat_mode,infer=infer)
self.layer5 = nn.MaxPool2d(kernel_size=2,stride=8)
self.fcs = nn.Sequential(
nn.Linear(4096,1024),
nn.ReLU(),
nn.Linear(1024,512),
nn.ReLU(),
nn.Linear(512,10))
def forward(self,x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = x.view(x.size(0),-1)
x = self.fcs(x)
return x
| 1,035 | 27 | 71 | py |
torch2trt | torch2trt-master/examples/contrib/quantization_aware_training/datasets/cifar10.py | import torch
import torchvision
import torchvision.transforms as transforms
class Cifar10Loaders:
"""
Data loaders for cifar 10 dataset
"""
def __init__(self, data_dir='/tmp/cifar10', download=True, batch_size=128, pin_memory=True, num_workers=4):
self.data_dir = data_dir
self.download = download
self.batch_size= batch_size
self.pin_memory = pin_memory
self.num_workers = num_workers
self.train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
self.test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
def train_loader(self,shuffle=True):
trainset = torchvision.datasets.CIFAR10(root=self.data_dir, train=True, download=True, transform=self.train_transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory)
return trainloader
def test_loader(self,shuffle=False):
testset = torchvision.datasets.CIFAR10(root=self.data_dir, train=False, download=True, transform=self.test_transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=self.batch_size, shuffle=shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory)
return testloader
| 1,641 | 41.102564 | 162 | py |
torch2trt | torch2trt-master/examples/contrib/quantization_aware_training/utils/utilities.py | import torch
import torch.nn as nn
import numpy as np
import collections
from pytorch_quantization import tensor_quant
from torch2trt.contrib.qat.layers.quant_conv import QuantConvBN2d,QuantConv2d,IQuantConv2d, IQuantConvBN2d
from torch2trt.contrib.qat.layers.quant_activation import QuantReLU, IQuantReLU
import torchvision.models as models
import re
import timeit
def transfer_learning_resnet18(pretrained=True):
resnet18 = models.resnet18(pretrained=pretrained)
num_ftrs = resnet18.fc.in_features
resnet18.fc = nn.Linear(num_ftrs, 10)
return resnet18
def transfer_learning_resnet34(pretrained=True):
resnet34 = models.resnet34(pretrained=pretrained)
num_ftrs = resnet34.fc.in_features
resnet34.fc = nn.Linear(num_ftrs,10)
return resnet34
def mapping_names(state_dict):
'''
func to map new names
'''
new_list = collections.OrderedDict()
for k,v in state_dict.items():
if re.search(r'conv\d.weight',k):
item = re.sub('weight','qconv.0.weight',k)
print("replacing {} to {}".format(k,item))
new_list[item]=v
elif re.search(r'bn\d.\w+',k):
m = re.search(r'bn\d.\w+',k).group(0)
word=m.split(".")[-1]
num = re.search(r'\d',m).group(0)
new_name = "conv"+num+".qconv.0.bn."+word
item = re.sub(r'bn\d.\w+',new_name,k)
print("replacing {} to {}".format(k,item))
new_list[item]=v
elif re.search(r'downsample.0.weight',k):
item = re.sub('weight','qconv.0.weight',k)
print("replacing {} to {}".format(k,item))
new_list[item]=v
elif re.search(r'downsample.1.\w+',k):
m = re.search(r'downsample.1.\w+',k).group(0)
word = m.split(".")[-1]
new_name = "downsample.0.qconv.0.bn."+word
item = re.sub(r'downsample.1.\w+',new_name,k)
print("replacing {} to {}".format(k,item))
new_list[item]=v
else:
print("adding {} to the new list".format(k))
new_list[k]=v
return new_list
def add_missing_keys(model_state,model_state_dict):
"""
add missing keys and defaulting the values to 1 for _amax counter
"""
for k,v in model_state.items():
if k not in model_state_dict.keys():
if re.search(r'folded_weight',k):
item = re.sub("folded_weight","weight",k)
tensor_size = model_state[item].size()
model_state_dict[k] = torch.ones(tensor_size)
print("adding {} with shape {} to the model state dict".format(k,tensor_size))
elif re.search(r'folded_bias',k):
item = re.sub("folded_bias","weight",k)
tensor_size = model_state[item].size()
model_state_dict[k] = torch.ones(tensor_size[0])
print("adding {} with shape {} to the model state dict".format(k,tensor_size[0]))
else:
print("adding {} to the model state dict".format(k))
model_state_dict[k]= torch.tensor(127)
return model_state_dict
## QAT qrapper for ReLU layer: toggles between training and inference
class qrelu(torch.nn.Module):
def __init__(self,inplace=False,qat=False,infer=False):
super().__init__()
if qat:
if infer:
self.relu = IQuantReLU(inplace)
else:
self.relu = QuantReLU(inplace)
else:
self.relu = nn.ReLU(inplace)
def forward(self,input):
return self.relu(input)
'''
Wrapper for conv2d + bn + relu layer.
Toggles between QAT mode(on and off)
Toggles between QAT training and inference
In QAT mode:
conv(quantized_weight) + BN + ReLU + quantized op.
'''
class qconv2d(torch.nn.Module):
"""
common layer for qat and non qat mode
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int=3,
stride: int=1,
padding: int=0,
groups: int=1,
dilation: int=1,
bias = None,
padding_mode: str='zeros',
eps: float=1e-5,
momentum: float=0.1,
freeze_bn = False,
act: bool= True,
norm: bool=True,
qat: bool=False,
infer: bool=False):
super().__init__()
if qat:
if infer:
if norm:
layer_list = [IQuantConvBN2d(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
dilation=dilation,
bias=bias,
padding_mode=padding_mode)]
else:
layer_list = [IQuantConv2d(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
dilation=dilation,
bias=bias,
padding_mode=padding_mode)]
else:
if norm:
layer_list=[QuantConvBN2d(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
dilation=dilation,
bias=bias,
padding_mode=padding_mode,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_PER_TENSOR)]
else:
layer_list = [QuantConv2d(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
dilation=dilation,
bias=bias,
padding_mode=padding_mode,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_PER_TENSOR)]
if act:
if infer:
layer_list.append(IQuantReLU())
else:
layer_list.append(QuantReLU())
self.qconv = nn.Sequential(*layer_list)
else:
layer_list=[
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
dilation=dilation,
bias=bias,
groups=groups)]
if norm:
layer_list.append(nn.BatchNorm2d(out_channels))
if act:
layer_list.append(nn.ReLU())
self.qconv = nn.Sequential(*layer_list)
def forward(self,inputs):
return self.qconv(inputs)
def calculate_accuracy(model,data_loader, is_cuda=True):
correct=0
total=0
model.eval()
with torch.no_grad():
for data in data_loader:
images , labels = data
if is_cuda:
images = images.cuda()
labels = labels.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted==labels).sum().item()
acc = correct * 100 / total
return acc
def timeGraph(model, input_t, num_loops):
print("Warm up ...")
with torch.no_grad():
for _ in range(20):
features = model(input_t)
torch.cuda.synchronize()
print("Start timing ...")
timings = []
with torch.no_grad():
for i in range(num_loops):
start_time = timeit.default_timer()
features = model(input_t)
torch.cuda.synchronize()
end_time = timeit.default_timer()
timings.append(end_time - start_time)
#print("Iteration {}: {:.6f} s".format(i, end_time - start_time))
print("Input shape:", input_t.size())
print("Output features size:", features.size())
return timings
def printStats(graphName, timings, batch_size):
times = np.array(timings)
steps = len(times)
speeds = batch_size / times
time_mean = np.mean(times)
time_med = np.median(times)
time_99th = np.percentile(times, 99)
time_std = np.std(times, ddof=0)
speed_mean = np.mean(speeds)
speed_med = np.median(speeds)
msg = ("\n%s =================================\n"
"batch size=%d, num iterations=%d\n"
" Median FPS: %.1f, mean: %.1f\n"
" Median latency: %.6f, mean: %.6f, 99th_p: %.6f, std_dev: %.6f\n"
) % (graphName,
batch_size, steps,
speed_med, speed_mean,
time_med, time_mean, time_99th, time_std)
print(msg)
| 9,171 | 32.845018 | 106 | py |
torch2trt | torch2trt-master/scripts/dump_converters.py | import argparse
import sys
import subprocess
import os
from importlib.machinery import SourceFileLoader
torch2trt = SourceFileLoader("torch2trt", "torch2trt/__init__.py").load_module() # to load relative to root
HEADER = """
# Converters
This table contains a list of supported PyTorch methods and their associated converters.
If your model is not converting, a good start in debugging would be to see if it contains a method not listed
in this table. You may also find these a useful reference when writing your own converters.
| Method | Converter |
|--------|-----------|"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--github',
type=str,
default='https://github.com/NVIDIA-AI-IOT/torch2trt')
parser.add_argument('--tag', type=str, default='master')
args = parser.parse_args()
print(HEADER)
for method, entry in torch2trt.CONVERTERS.items():
if not entry['is_real']:
continue
converter = entry['converter']
# get commit hash
# p = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# commit, err = p.communicate()
# commit = commit.decode('utf-8').strip('\n')
# get github URL
url = '{github}/blob/{commit}/{relpath}#L{lineno}'.format(
github=args.github,
commit=args.tag,
relpath=os.path.relpath(converter.__code__.co_filename,
os.path.abspath('.')),
lineno=converter.__code__.co_firstlineno)
print('| ``{method}`` | [``{converter}``]({url}) |'.format(
method=method, converter=converter.__name__, url=url))
| 1,834 | 32.363636 | 109 | py |
torch2trt | torch2trt-master/scripts/profile_timm.py | import os
import timm
import torch
import time
import json
from torch2trt import torch2trt, TRTModule, trt
from dataclasses import dataclass, asdict
from argparse_dataclass import ArgumentParser
from typing import Literal
from enum import Enum
from contextlib import redirect_stderr, redirect_stdout
import io
class Status:
STARTING = "STARTING"
PROFILING = "PROFILING"
FINISHED = "FINISED"
def profile_qps(model, data, num_warmup, num_profile):
for _ in range(num_warmup):
out = model(data)
torch.cuda.current_stream().synchronize()
t0 = time.perf_counter()
for _ in range(num_profile):
out = model(data)
torch.cuda.current_stream().synchronize()
t1 = time.perf_counter()
return num_profile / (t1 - t0)
def profile_latency(model, data, num_warmup, num_profile):
for _ in range(num_warmup):
out = model(data)
torch.cuda.current_stream().synchronize()
t0 = time.perf_counter()
for _ in range(num_profile):
out = model(data)
torch.cuda.current_stream().synchronize()
t1 = time.perf_counter()
return (t1 - t0) / num_profile
def log_level_to_trt(log_level: str):
if log_level == "verbose":
return trt.Logger.VERBOSE
elif log_level == "info":
return trt.Logger.INFO
elif log_level == "error":
return trt.Logger.ERROR
elif log_level == "warning":
return trt.Logger.WARNING
elif log_level == "internal_error":
return trt.Logger.INTERNAL_ERROR
else:
raise ValueError(f"Unknown log level: {log_level}")
@dataclass
class Args:
model: str
output_dir: str = "data/timm"
batch_size: int = 1
fp16_mode: bool = False
int8_mode: bool = False
size: int = 224
pretrained: bool = False
save_engine: bool = False
num_warmup: int = 10
num_profile: int = 100
use_cached_engine: bool = False
log_level: Literal["verbose", "info", "error", "warning", "internal_error"] = "error"
use_onnx: bool = False
def id(self):
fn = self.model
if self.pretrained:
fn += "-pre"
fn += "-trt"
fn += f"-{self.batch_size}"
fn += f"-{self.size}"
if self.fp16_mode:
fn += "-fp16"
if self.int8_mode:
fn += "-int8"
if self.use_onnx:
fn += "-onnx"
return fn
def engine_filepath(self):
return os.path.join(self.full_output_dir(), self.id() + ".pth")
def metadata_filepath(self):
return os.path.join(self.full_output_dir(), self.id() + ".json")
def write_output(self, output):
with open(self.metadata_filepath(), 'w') as f:
json.dump(output, f, indent=2)
def stderr_filepath(self):
return os.path.join(self.full_output_dir(), self.id() + ".stderr")
def stdout_filepath(self):
return os.path.join(self.full_output_dir(), self.id() + ".stdout")
def full_output_dir(self):
return os.path.join(self.output_dir, self.id())
def run(self):
if not os.path.exists(self.full_output_dir()):
os.makedirs(self.full_output_dir())
with open(self.stderr_filepath(), 'w') as ferr:
with open(self.stdout_filepath(), 'w') as fout:
with redirect_stderr(ferr):
with redirect_stdout(fout):
with torch.no_grad():
model = timm.create_model(self.model, pretrained=self.pretrained)
model = model.cuda().eval()
data = torch.randn(self.batch_size, 3, self.size, self.size).cuda()
output = {}
output['args'] = asdict(self)
output['status'] = str(Status.STARTING)
self.write_output(output)
if self.use_cached_engine and os.path.exists(self.engine_filepath()):
print("Loading cached engine...")
model_trt = TRTModule()
model_trt.load_state_dict(torch.load(self.engine_filepath()))
else:
print("Building engine...")
model_trt = torch2trt(
model,
[data],
fp16_mode=self.fp16_mode,
int8_mode=self.int8_mode,
log_level=log_level_to_trt(self.log_level),
use_onnx=self.use_onnx
)
if self.save_engine:
print(f"Saving engine to {self.engine_filepath()}...")
torch.save(model_trt.state_dict(), self.engine_filepath())
output['status'] = str(Status.PROFILING)
self.write_output(output)
data = torch.randn(self.batch_size, 3, self.size, self.size).cuda()
print(f"Profiling PyTorch...")
qps_torch = profile_qps(model, data, self.num_warmup, self.num_profile)
latency_torch = profile_latency(model, data, self.num_warmup, self.num_profile)
fps_torch = qps_torch * self.batch_size
print(f"Profiling TensorRT...")
qps_trt = profile_qps(model_trt, data, self.num_warmup, self.num_profile)
latency_trt = profile_latency(model_trt, data, self.num_warmup, self.num_profile)
fps_trt = qps_trt * self.batch_size
data = torch.randn(self.batch_size, 3, self.size, self.size).cuda()
dout = model(data)
dout_trt = model_trt(data)
max_abs_error = float(torch.max(torch.abs(dout - dout_trt)))
result = {}
result['latency_torch'] = latency_torch
result['latency_trt'] = latency_trt
result['fps_torch'] = fps_torch
result['fps_trt'] = fps_trt
result['max_abs_error'] = max_abs_error
output['results'] = result
output['status'] = str(Status.FINISHED)
self.write_output(output)
if __name__ == "__main__":
parser = ArgumentParser(Args)
args = parser.parse_args()
print(json.dumps(asdict(args), indent=2))
args.run() | 6,845 | 36.823204 | 109 | py |
torch2trt | torch2trt-master/torch2trt/flattener.py | import copy
import torch
def _default_condition(x):
return isinstance(x, torch.Tensor) and (x.dtype is torch.half or x.dtype is torch.float or x.dtype == torch.bool)
def _make_schema_from_value(value, condition=_default_condition, size=0):
if condition(value):
return size, size + 1
elif isinstance(value, list) or isinstance(value, tuple):
schema = []
for child_value in value:
child_schema, size = _make_schema_from_value(child_value, condition, size)
schema.append(child_schema)
if isinstance(value, tuple):
schema = tuple(schema)
return schema, size
elif isinstance(value, dict):
schema = {}
for child_key in sorted(value.keys()):
child_value = value[child_key]
child_schema, size = _make_schema_from_value(child_value, condition, size)
schema[child_key] = child_schema
return schema, size
else:
return None, size
class Flattener(object):
def __init__(self, schema, size):
self._schema = schema
self._size = size
@staticmethod
def from_value(value, condition=_default_condition):
return Flattener(*_make_schema_from_value(value, condition))
@staticmethod
def from_dict(x):
return Flattener(x['schema'], x['size'])
def dict(self):
return {'schema': self.schema, 'size': self.size}
@property
def schema(self):
return self._schema
@property
def size(self):
return self._size
def __len__(self):
return self._size
def _flatten(self, value, result):
if isinstance(self._schema, int):
result[self._schema] = value
elif isinstance(self._schema, list) or isinstance(self._schema, tuple):
for child_value, child_schema in zip(value, self._schema):
Flattener(child_schema, self.size)._flatten(child_value, result)
elif isinstance(self._schema, dict):
for key in sorted(self._schema.keys()):
child_value = value[key]
child_schema = self._schema[key]
Flattener(child_schema, self.size)._flatten(child_value, result)
def flatten(self, value):
result = [None for i in range(self.size)]
self._flatten(value, result)
return result
def unflatten(self, flattened):
if isinstance(self._schema, int):
return flattened[self._schema]
elif isinstance(self._schema, list) or isinstance(self._schema, tuple):
result = []
for child_schema in self._schema:
result.append(Flattener(child_schema, self.size).unflatten(flattened))
if isinstance(self._schema, tuple):
result = tuple(result)
return result
elif isinstance(self._schema, dict):
result = {}
for child_key in sorted(self._schema.keys()):
child_schema = self._schema[child_key]
result[child_key] = Flattener(child_schema, self.size).unflatten(flattened)
return result
else:
return None | 3,178 | 33.182796 | 117 | py |
torch2trt | torch2trt-master/torch2trt/module_test.py | import torch
import torchvision
class ModuleTest(object):
def __init__(self, module_fn, dtype, device, input_shapes, **torch2trt_kwargs):
self.module_fn = module_fn
self.dtype = dtype
self.device = device
self.input_shapes = input_shapes
self.torch2trt_kwargs = torch2trt_kwargs
def module_name(self):
return self.module_fn.__module__ + '.' + self.module_fn.__name__
MODULE_TESTS = [
]
def add_module_test(dtype, device, input_shapes, enabled=True, **torch2trt_kwargs):
def register_module_test(module):
global MODULE_TESTS
MODULE_TESTS += [ModuleTest(module, dtype, device, input_shapes, **torch2trt_kwargs)]
return module
def pass_module_test(module):
return module
if enabled:
return register_module_test
else:
return pass_module_test
return register_module_test
| 908 | 24.25 | 93 | py |
torch2trt | torch2trt-master/torch2trt/test.py | from torch2trt import *
from .module_test import ModuleTest, MODULE_TESTS
import time
import argparse
import re
import runpy
import traceback
from termcolor import colored
import math
import tempfile
import numpy as np
def pSNR(model_op,trt_op):
#model_op = model_op.cpu().detach().numpy().flatten()
#trt_op = trt_op.cpu().detach().numpy().flatten()
# Calculating Mean Squared Error
mse = np.sum(np.square(model_op - trt_op)) / len(model_op)
# Calcuating peak signal to noise ratio
try:
psnr_db = 20 * math.log10(np.max(abs(model_op))) - 10 * math.log10(mse)
except:
psnr_db = np.nan
return mse,psnr_db
def run(self, serialize=False):
# create module
module = self.module_fn()
module = module.to(self.device)
module = module.type(self.dtype)
module = module.eval()
# create inputs for conversion
inputs_conversion = ()
for shape in self.input_shapes:
inputs_conversion += (torch.zeros(shape).to(self.device).type(self.dtype), )
# convert module
module_trt = torch2trt(module, inputs_conversion, max_workspace_size=1 << 20, **self.torch2trt_kwargs)
if serialize:
with tempfile.TemporaryFile() as f:
torch.save(module_trt.state_dict(), f)
f.seek(0)
module_trt = TRTModule()
module_trt.load_state_dict(torch.load(f))
# create inputs for torch/trt.. copy of inputs to handle inplace ops
inputs = ()
for shape in self.input_shapes:
inputs += (torch.randn(shape).to(self.device).type(self.dtype), )
inputs_trt = tuple([tensor.clone() for tensor in inputs])
# test output against original
outputs = module(*inputs)
outputs_trt = module_trt(*inputs_trt)
if not isinstance(outputs, tuple):
outputs = (outputs, )
if not isinstance(outputs_trt, tuple):
outputs_trt = (outputs_trt,)
# compute max error
max_error = 0
for i in range(len(outputs)):
max_error_i = 0
if outputs[i].dtype == torch.bool:
max_error_i = torch.sum(outputs[i] ^ outputs_trt[i])
else:
max_error_i = torch.max(torch.abs(outputs[i] - outputs_trt[i]))
if max_error_i > max_error:
max_error = max_error_i
## calculate peak signal to noise ratio
assert(len(outputs) == len(outputs_trt))
## Check if output is boolean
# if yes, then dont calculate psnr
if outputs[0].dtype == torch.bool:
mse = np.nan
psnr_db = np.nan
else:
model_op = []
trt_op = []
for i in range(len(outputs)):
model_op.extend(outputs[i].detach().cpu().numpy().flatten())
trt_op.extend(outputs_trt[i].detach().cpu().numpy().flatten())
model_op = np.array(model_op)
trt_op = np.array(trt_op)
mse,psnr_db = pSNR(model_op,trt_op)
# benchmark pytorch throughput
torch.cuda.current_stream().synchronize()
t0 = time.time()
for i in range(50):
outputs = module(*inputs)
torch.cuda.current_stream().synchronize()
t1 = time.time()
fps = 50.0 / (t1 - t0)
# benchmark tensorrt throughput
torch.cuda.current_stream().synchronize()
t0 = time.time()
for i in range(50):
outputs = module_trt(*inputs)
torch.cuda.current_stream().synchronize()
t1 = time.time()
fps_trt = 50.0 / (t1 - t0)
# benchmark pytorch latency
torch.cuda.current_stream().synchronize()
t0 = time.time()
for i in range(50):
outputs = module(*inputs)
torch.cuda.current_stream().synchronize()
t1 = time.time()
ms = 1000.0 * (t1 - t0) / 50.0
# benchmark tensorrt latency
torch.cuda.current_stream().synchronize()
t0 = time.time()
for i in range(50):
outputs = module_trt(*inputs)
torch.cuda.current_stream().synchronize()
t1 = time.time()
ms_trt = 1000.0 * (t1 - t0) / 50.0
return max_error,psnr_db,mse, fps, fps_trt, ms, ms_trt
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output', '-o', help='Test output file path', type=str, default='torch2trt_test.md')
parser.add_argument('--name', help='Regular expression to filter modules to test by name', type=str, default='.*')
parser.add_argument('--tolerance', help='Maximum error to print warning for entry', type=float, default='-1')
parser.add_argument('--include', help='Addition python file to include defining additional tests', action='append', default=[])
parser.add_argument('--use_onnx', help='Whether to test using ONNX or torch2trt tracing', action='store_true')
parser.add_argument('--serialize', help='Whether to use serialization / deserialization of TRT modules before test', action='store_true')
args = parser.parse_args()
for include in args.include:
runpy.run_module(include)
num_tests, num_success, num_tolerance, num_error, num_tolerance_psnr = 0, 0, 0, 0, 0
for test in MODULE_TESTS:
# filter by module name
name = test.module_name()
if not re.search(args.name, name):
continue
num_tests += 1
# run test
try:
if args.use_onnx:
test.torch2trt_kwargs.update({'use_onnx': True})
max_error,psnr_db,mse, fps, fps_trt, ms, ms_trt = run(test, serialize=args.serialize)
# write entry
line = '| %70s | %s | %25s | %s | %.2E | %.2f | %.2E | %.3g | %.3g | %.3g | %.3g |' % (name, test.dtype.__repr__().split('.')[-1], str(test.input_shapes), str(test.torch2trt_kwargs), max_error,psnr_db,mse, fps, fps_trt, ms, ms_trt)
if args.tolerance >= 0 and max_error > args.tolerance:
print(colored(line, 'yellow'))
num_tolerance += 1
elif psnr_db < 100:
print(colored(line, 'magenta'))
num_tolerance_psnr +=1
else:
print(line)
num_success += 1
except:
line = '| %s | %s | %s | %s | N/A | N/A | N/A | N/A | N/A |' % (name, test.dtype.__repr__().split('.')[-1], str(test.input_shapes), str(test.torch2trt_kwargs))
print(colored(line, 'red'))
num_error += 1
tb = traceback.format_exc()
print(tb)
with open(args.output, 'a+') as f:
f.write(line + '\n')
print('NUM_TESTS: %d' % num_tests)
print('NUM_SUCCESSFUL_CONVERSION: %d' % num_success)
print('NUM_FAILED_CONVERSION: %d' % num_error)
print('NUM_ABOVE_TOLERANCE: %d' % num_tolerance)
print('NUM_pSNR_TOLERANCE: %d' %num_tolerance_psnr)
| 6,819 | 33.619289 | 243 | py |
torch2trt | torch2trt-master/torch2trt/dataset_test.py | import pytest
import torch
import torch.nn as nn
from torch2trt.dataset import (
TensorBatchDataset,
ListDataset,
FolderDataset
)
from tempfile import mkdtemp
def test_dataset_shapes():
dataset = ListDataset()
dataset.insert((torch.randn(1, 3, 32, 32), torch.randn(1, 4)))
dataset.insert((torch.randn(1, 3, 64, 64), torch.randn(1, 8)))
dataset.insert((torch.randn(1, 3, 48, 48), torch.randn(1, 6)))
shapes = dataset.shapes()
assert(shapes[0][0] == (1, 3, 32, 32))
assert(shapes[0][1] == (1, 3, 64, 64))
assert(shapes[1][0] == (1, 4))
assert(shapes[1][1] == (1, 8))
assert(dataset.min_shapes()[0] == (1, 3, 32, 32))
assert(dataset.min_shapes()[1] == (1, 4))
assert(dataset.max_shapes()[0] == (1, 3, 64, 64))
assert(dataset.max_shapes()[1] == (1, 8))
assert(dataset.median_numel_shapes()[0] == (1, 3, 48, 48))
assert(dataset.median_numel_shapes()[1] == (1, 6))
def test_dataset_infer_dynamic_axes():
dataset = ListDataset()
dataset.insert((torch.randn(1, 3, 32, 32), torch.randn(1, 4)))
dataset.insert((torch.randn(1, 3, 64, 64), torch.randn(1, 8)))
dataset.insert((torch.randn(1, 3, 48, 48), torch.randn(1, 6)))
dynamic_axes = dataset.infer_dynamic_axes()
assert(dynamic_axes[0] == [2, 3])
assert(dynamic_axes[1] == [1])
def test_tensor_batch_dataset_record():
dataset = TensorBatchDataset()
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1).cuda().eval()
def forward(self, x, y):
a = self.conv(x)
b = self.conv(y)
return torch.cat([a, b], dim=0)
inputs = [
torch.randn(1, 3, 32, 32).cuda(),
torch.randn(1, 3, 32, 32).cuda()
]
module = TestModule().cuda().eval()
with dataset.record(module):
for i in range(5):
module(*inputs)
assert(len(dataset) == 5)
assert(len(dataset[0]) == 2)
assert(dataset[0][0].shape == (1, 3, 32, 32))
assert(dataset[0][1].shape == (1, 3, 32, 32))
def test_list_dataset_record():
dataset = ListDataset()
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1).cuda().eval()
def forward(self, x, y):
a = self.conv(x)
b = self.conv(y)
return torch.cat([a, b], dim=0)
inputs = [
torch.randn(1, 3, 32, 32).cuda(),
torch.randn(1, 3, 32, 32).cuda()
]
module = TestModule().cuda().eval()
with dataset.record(module):
for i in range(5):
module(*inputs)
assert(len(dataset) == 5)
assert(len(dataset[0]) == 2)
assert(dataset[0][0].shape == (1, 3, 32, 32))
assert(dataset[0][1].shape == (1, 3, 32, 32))
def test_folder_dataset_record():
dataset = FolderDataset(mkdtemp())
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1).cuda().eval()
def forward(self, x, y):
a = self.conv(x)
b = self.conv(y)
return torch.cat([a, b], dim=0)
device = torch.device('cuda:0')
inputs = [
torch.randn(1, 3, 32, 32, device=device),
torch.randn(1, 3, 32, 32, device=device)
]
module = TestModule().to(device).eval()
with dataset.record(module):
for i in range(5):
module(*inputs)
assert(len(dataset) == 5)
assert(len(dataset[0]) == 2)
assert(dataset[0][0].shape == (1, 3, 32, 32))
assert(dataset[0][1].shape == (1, 3, 32, 32))
assert(dataset[0][0].device == device) | 3,805 | 26.185714 | 89 | py |
torch2trt | torch2trt-master/torch2trt/dataset.py | import os
import torch
import glob
from uuid import uuid1
from torch2trt.flattener import Flattener
__all__ = [
'DatasetRecorder',
'Dataset',
'ListDataset',
'TensorBatchDataset'
]
class DatasetRecorder(object):
def __init__(self, dataset, module):
self.dataset = dataset
self.module = module
self.handle = None
def __enter__(self, *args, **kwargs):
if self.handle is not None:
raise RuntimeError('DatasetRecorder is already active.')
self.handle = self.module.register_forward_pre_hook(self._callback)
return self
def __exit__(self, *args, **kwargs):
if self.handle is not None:
self.handle.remove()
self.handle = None
def _callback(self, module, input):
self.dataset.insert(input)
class Dataset(object):
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
raise NotImplementedError
def insert(self, item):
raise NotImplementedError
def record(self, module):
return DatasetRecorder(self, module)
def num_inputs(self):
return len(self.getitem_flat(0))
@property
def flattener(self):
if not hasattr(self, '_flattener') or self._flattener is None:
assert(len(self) > 0, 'Cannot create default flattener without input data.')
value = self[0]
self._flattener = Flattener.from_value(value)
return self._flattener
def getitem_flat(self, index):
return self.flattener.flatten(self[index])
def shapes_for_index(self, index, flat=False):
shapes = [None for i in range(self.num_inputs())]
tensors = self.getitem_flat(index)
for j in range(len(tensors)):
shapes[j] = torch.Size(tuple(tensors[j].shape))
if flat:
return shapes
else:
return self.flattener.unflatten(shapes)
def shapes(self, flat=False):
shapes = [[] for i in range(self.num_inputs())]
for i in range(len(self)):
tensors = self.getitem_flat(i)
for j in range(len(tensors)):
shapes[j].append(torch.Size(tuple(tensors[j].shape)))
if flat:
return shapes
else:
return self.flattener.unflatten(shapes)
def _shape_stats(self, stat_fn, flat=False):
shapes = []
for s in self.shapes(flat=True):
shape_tensor = []
for si in s:
shape_tensor.append(tuple(si))
shape_tensor = torch.LongTensor(shape_tensor)
shapes.append(shape_tensor)
stat_shapes = []
for shape in shapes:
stat_shape = torch.Size(stat_fn(shape))
stat_shapes.append(stat_shape)
if flat:
return stat_shapes
else:
return self.flattener.unflatten(stat_shapes)
def min_shapes(self, flat=False):
return self._shape_stats(lambda x: torch.min(x, dim=0)[0], flat)
def max_shapes(self, flat=False):
return self._shape_stats(lambda x: torch.max(x, dim=0)[0], flat)
def item_numel(self, index):
tensors = self.getitem_flat(index)
return sum([t.numel() for t in tensors])
def median_numel_shapes(self, flat=False):
numels = torch.LongTensor([self.item_numel(i) for i in range(len(self))])
median_index = int(torch.argsort(numels)[len(numels) // 2])
return self.shapes_for_index(median_index, flat=flat)
def infer_dynamic_axes(self, flat=False):
min_shapes = self.min_shapes(flat=True)
max_shapes = self.max_shapes(flat=True)
dynamic_axes = [[] for i in range(self.num_inputs())]
for i, (mins, maxs) in enumerate(zip(min_shapes, max_shapes)):
for j, (mins_i, maxs_i) in enumerate(zip(mins, maxs)):
if mins_i != maxs_i:
dynamic_axes[i].append(j)
if flat:
return dynamic_axes
else:
return self.flattener.unflatten(dynamic_axes)
class ListDataset(Dataset):
def __init__(self, items=None):
if items is None:
items = []
self.items = [t for t in items]
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
def insert(self, item):
self.items.append(item)
class TensorBatchDataset(Dataset):
def __init__(self, tensors=None):
if tensors is not None:
self._flattener = Flattener.from_value(tensors)
self.tensors = self._flattener.flatten(tensors)
else:
self._flattener = None
self.tensors = None
def __len__(self):
if self.tensors is None:
return 0
else:
return len(self.tensors[0])
def __getitem__(self, idx):
if self.tensors is None:
raise IndexError('Dataset is empty.')
return self.flattener.unflatten([t[idx:idx+1] for t in self.tensors])
def insert(self, tensors):
if self._flattener is None:
self._flattener = Flattener.from_value(tensors)
tensors = self.flattener.flatten(tensors)
if self.tensors is None:
self.tensors = tensors
else:
if len(self.tensors) != len(tensors):
raise ValueError('Number of inserted tensors does not match the number of tensors in the current dataset.')
self.tensors = tuple([
torch.cat((self.tensors[index], tensors[index]), dim=0)
for index in range(len(tensors))
])
class FolderDataset(Dataset):
def __init__(self, folder):
super().__init__()
if not os.path.exists(folder):
os.makedirs(folder)
self.folder = folder
def file_paths(self):
return sorted(glob.glob(os.path.join(self.folder, '*.pth')))
def __len__(self):
return len(self.file_paths())
def __getitem__(self, index):
return torch.load(self.file_paths()[index])
def insert(self, tensors):
i = 0
file_paths = [os.path.basename(path) for path in self.file_paths()]
while ('input_%d.pth' % i) in file_paths:
i += 1
torch.save(tensors, os.path.join(self.folder, 'input_%d.pth' % i)) | 6,391 | 28.456221 | 123 | py |
torch2trt | torch2trt-master/torch2trt/dataset_calibrator_test.py | import pytest
import tensorrt as trt
import torch
import torch.nn as nn
from torch2trt.dataset import (
TensorBatchDataset,
ListDataset
)
from torch2trt import torch2trt
def test_dataset_calibrator_batch_dataset():
torch.manual_seed(0)
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1).cuda().eval()
def forward(self, x, y):
a = self.conv(x)
b = self.conv(y)
return torch.cat([a, b], dim=0)
inputs = [
torch.randn(1, 3, 32, 32).cuda(),
torch.randn(1, 3, 32, 32).cuda()
]
module = TestModule().cuda().eval()
dataset = TensorBatchDataset()
with dataset.record(module):
for i in range(50):
module(*inputs)
module_trt = torch2trt(
module,
dataset[0],
int8_mode=True,
int8_calib_dataset=dataset,
log_level=trt.Logger.INFO
)
inputs = [
torch.randn(1, 3, 32, 32).cuda(),
torch.randn(1, 3, 32, 32).cuda()
]
output = module(*inputs)
output_trt = module_trt(*inputs)
assert(torch.allclose(output, output_trt, rtol=1e-3, atol=1e-3))
def test_dataset_calibrator_list_dataset():
torch.manual_seed(0)
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1).cuda().eval()
def forward(self, x, y):
a = self.conv(x)
b = self.conv(y)
return torch.cat([a, b], dim=0)
inputs = [
torch.randn(1, 3, 32, 32).cuda(),
torch.randn(1, 3, 32, 32).cuda()
]
module = TestModule().cuda().eval()
dataset = ListDataset()
with dataset.record(module):
for i in range(50):
module(*inputs)
module_trt = torch2trt(
module,
dataset[0],
int8_mode=True,
int8_calib_dataset=dataset,
log_level=trt.Logger.INFO
)
inputs = [
torch.randn(1, 3, 32, 32).cuda(),
torch.randn(1, 3, 32, 32).cuda()
]
output = module(*inputs)
output_trt = module_trt(*inputs)
assert(torch.allclose(output, output_trt, rtol=1e-3, atol=1e-3))
if __name__ == '__main__':
test_dataset_calibrator_list_dataset() | 2,391 | 21.780952 | 89 | py |
torch2trt | torch2trt-master/torch2trt/flatten_module.py | import torch
import torch.nn as nn
from .flattener import Flattener
class Unflatten(nn.Module):
def __init__(self, module, input_flattener=None, output_flattener=None):
super().__init__()
self.module = module
self.input_flattener = input_flattener
self.output_flattener = output_flattener
def forward(self, *args):
if self.input_flattener is not None:
args = self.input_flattener.flatten(args)
output = self.module(*args)
if self.output_flattener is not None:
output = self.output_flattener.unflatten(output)
return output
class Flatten(nn.Module):
def __init__(self, module, input_flattener=None, output_flattener=None):
super().__init__()
self.module = module
self.input_flattener = input_flattener
self.output_flattener = output_flattener
def forward(self, *args):
if self.input_flattener is not None:
args = self.input_flattener.unflatten(*args)
output = self.module(*args)
if self.output_flattener is not None:
output = self.output_flattener.flatten(output)
return output | 1,176 | 30.810811 | 76 | py |
torch2trt | torch2trt-master/torch2trt/dataset_calibrator.py | import torch
import tensorrt as trt
import os
from .flattener import Flattener
__all__ = [
'DEFAULT_CALIBRATION_ALGORITHM',
'DatasetCalibrator'
]
if trt.__version__ >= '5.1':
DEFAULT_CALIBRATION_ALGORITHM = trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2
else:
DEFAULT_CALIBRATION_ALGORITHM = trt.CalibrationAlgoType.ENTROPY_CALIBRATION
class DatasetCalibrator(trt.IInt8Calibrator):
def __init__(self, dataset, algorithm=DEFAULT_CALIBRATION_ALGORITHM, cache_file=None, flattener=None):
super(DatasetCalibrator, self).__init__()
self.dataset = dataset
self.algorithm = algorithm
self.count = 0
self.cache_file = cache_file
if flattener is None:
flattener = Flattener.from_value(dataset[0])
self.flattener = flattener
def get_batch(self, *args, **kwargs):
if self.count < len(self.dataset):
tensors = self.flattener.flatten(self.dataset[self.count])
bindings = [int(t.data_ptr()) for t in tensors]
self.count += 1
return bindings
else:
return []
def get_algorithm(self):
return self.algorithm
def get_batch_size(self):
return 1
def read_calibration_cache(self, *args, **kwargs):
if (self.cache_file is not None) and os.path.exists(self.cache_file):
with open(self.cache_file, 'rb') as f:
return f.read()
def write_calibration_cache(self, cache, *args, **kwargs):
if self.cache_file is not None:
with open(self.cache_file, 'wb') as f:
f.write(cache)
| 1,644 | 29.462963 | 106 | py |
torch2trt | torch2trt-master/torch2trt/__init__.py | from .torch2trt import *
from .converters import *
import tensorrt as trt
def load_plugins():
import torch2trt.torch_plugins
registry = trt.get_plugin_registry()
torch2trt_creators = [c for c in registry.plugin_creator_list if c.plugin_namespace == 'torch2trt']
for c in torch2trt_creators:
registry.register_creator(c, 'torch2trt')
try:
load_plugins()
except:
pass
| 400 | 24.0625 | 103 | py |
torch2trt | torch2trt-master/torch2trt/dynamic_shape_test.py | import pytest
import torch
import torch.nn as nn
import tensorrt as trt
from torch2trt import torch2trt
from torch2trt.dataset import ListDataset
def test_dynamic_shape_conv2d():
torch.manual_seed(0)
module = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1).cuda().eval()
dataset = ListDataset()
dataset.insert((torch.randn(1, 3, 224, 224).cuda(),))
dataset.insert((torch.randn(1, 3, 64, 64).cuda(),))
dataset.insert((torch.randn(1, 3, 128, 128).cuda(),))
dataset.insert((torch.randn(4, 3, 32, 32).cuda(),))
module_trt = torch2trt(
module,
dataset,
log_level=trt.Logger.INFO
)
inputs = dataset[0]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
inputs = dataset[1]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
inputs = dataset[2]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
inputs = dataset[3]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
def test_dynamic_shape_conv2d_onnx():
torch.manual_seed(0)
module = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1).cuda().eval()
dataset = ListDataset()
dataset.insert((torch.randn(1, 3, 224, 224).cuda(),))
dataset.insert((torch.randn(1, 3, 64, 64).cuda(),))
dataset.insert((torch.randn(1, 3, 128, 128).cuda(),))
dataset.insert((torch.randn(4, 3, 32, 32).cuda(),))
module_trt = torch2trt(
module,
dataset,
use_onnx=True,
log_level=trt.Logger.INFO
)
inputs = dataset[0]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
inputs = dataset[1]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
inputs = dataset[2]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
inputs = dataset[3]
assert(torch.allclose(module(*inputs), module_trt(*inputs), rtol=1e-3, atol=1e-3))
if __name__ == '__main__':
test_dynamic_shape_conv2d() | 2,125 | 30.264706 | 86 | py |
torch2trt | torch2trt-master/torch2trt/flatten_module_test.py | import torch
import torch.nn as nn
from torch2trt import torch2trt
def test_flatten_nested_tuple_args():
class TestModule(nn.Module):
def forward(self, x, yz):
return torch.cat([x, yz[0], yz[1]], dim=-1)
module = TestModule().cuda().eval()
data = (
torch.randn(1, 3, 32, 32).cuda(),
(
torch.randn(1, 3, 32, 32).cuda(),
torch.randn(1, 3, 32, 32).cuda()
)
)
module_trt = torch2trt(module, data)
out = module(*data)
out_trt = module_trt(*data)
assert(torch.allclose(out, out_trt, atol=1e-3, rtol=1e-3))
| 610 | 19.366667 | 62 | py |
torch2trt | torch2trt-master/torch2trt/flattener_test.py | import pytest
import torch
from torch2trt.flattener import Flattener
def test_flattener_from_value():
x = (torch.ones(3), torch.ones(3))
flattener = Flattener.from_value(x)
assert(isinstance(flattener.schema, tuple))
assert(flattener.schema[0] == 0)
assert(flattener.schema[1] == 1)
def test_flattener_tuple():
x = (torch.ones(3), torch.ones(3))
flattener = Flattener.from_value(x)
y = flattener.flatten(x)
assert(len(y) == len(x))
assert(y[0] is x[0])
assert(y[1] is x[1])
z = flattener.unflatten(y)
assert(isinstance(z, tuple))
assert(z[0] is x[0])
assert(z[1] is x[1])
def test_flattener_list():
x = [torch.ones(3), torch.ones(3)]
flattener = Flattener.from_value(x)
y = flattener.flatten(x)
assert(len(y) == len(x))
assert(y[0] is x[0])
assert(y[1] is x[1])
z = flattener.unflatten(y)
assert(isinstance(z, list))
assert(z[0] is x[0])
assert(z[1] is x[1])
def test_flattener_dict():
x = {'a': torch.ones(3), 'b': torch.ones(3)}
flattener = Flattener.from_value(x)
y = flattener.flatten(x)
assert(len(y) == len(x))
assert((y[0] is x['a'] and y[1] is x['b']) or (y[1] is x['a'] and y[0] is x['b']))
z = flattener.unflatten(y)
assert(isinstance(z, dict))
assert(z['a'] is x['a'])
assert(z['b'] is x['b'])
def test_flattener_nested_tuple():
x = (torch.ones(1), (torch.ones(2), torch.ones(3)))
flattener = Flattener.from_value(x)
y = flattener.flatten(x)
assert(len(y) == 3)
z = flattener.unflatten(y)
assert(isinstance(z, tuple))
assert(isinstance(z[1], tuple))
assert(z[0] is x[0])
assert(z[1][0] is x[1][0])
assert(z[1][1] is x[1][1])
def test_flattener_nested_list():
x = [torch.ones(1), [torch.ones(2), torch.ones(3)]]
flattener = Flattener.from_value(x)
y = flattener.flatten(x)
assert(len(y) == 3)
z = flattener.unflatten(y)
assert(isinstance(z, list))
assert(isinstance(z[1], list))
assert(z[0] is x[0])
assert(z[1][0] is x[1][0])
assert(z[1][1] is x[1][1])
assert(z[0] is x[0])
assert(z[1][0] is x[1][0])
assert(z[1][1] is x[1][1])
def test_flattener_nested_dict():
x = {'a': torch.ones(1), 'b': {'a': torch.ones(2), 'b': torch.ones(3)}}
flattener = Flattener.from_value(x)
y = flattener.flatten(x)
assert(len(y) == 3)
z = flattener.unflatten(y)
assert(isinstance(z, dict))
assert(isinstance(z['b'], dict))
assert(z['a'] is x['a'])
assert(z['b']['a'] is x['b']['a'])
assert(z['b']['b'] is x['b']['b'])
def test_flattener_heterogeneous():
x = {
'a': (torch.ones(1), {'a': torch.ones(2)}),
'b': [torch.ones(3), torch.ones(4), (torch.ones(5), {'a': torch.ones(6)})]
}
flattener = Flattener.from_value(x)
y = flattener.flatten(x)
assert(len(y) == 6)
z = flattener.unflatten(y)
assert(isinstance(z, dict))
assert(isinstance(z['a'], tuple))
assert(z['a'][0] is x['a'][0])
assert(isinstance(z['a'][1], dict))
assert(z['a'][1]['a'] is x['a'][1]['a'])
assert(isinstance(z['b'], list))
assert(z['b'][0] is x['b'][0])
assert(z['b'][1] is x['b'][1])
assert(isinstance(z['b'][2], tuple))
assert(z['b'][2][0] is x['b'][2][0])
assert(isinstance(z['b'][2][1], dict))
assert(z['b'][2][1]['a'] is x['b'][2][1]['a']) | 3,441 | 20.647799 | 86 | py |
torch2trt | torch2trt-master/torch2trt/torch2trt.py | import torch
import tensorrt as trt
import copy
import numpy as np
import io
from collections import defaultdict
import importlib
from .dataset_calibrator import (
DatasetCalibrator,
DEFAULT_CALIBRATION_ALGORITHM,
)
from .dataset import (
Dataset,
TensorBatchDataset,
ListDataset
)
from .flattener import Flattener
from .flatten_module import Flatten, Unflatten
# UTILITY FUNCTIONS
def trt_version():
return trt.__version__
def torch_version():
return torch.__version__
def torch_dtype_to_trt(dtype):
if trt_version() >= '7.0' and dtype == torch.bool:
return trt.bool
elif dtype == torch.int8:
return trt.int8
elif dtype == torch.int32:
return trt.int32
elif dtype == torch.float16:
return trt.float16
elif dtype == torch.float32:
return trt.float32
else:
raise TypeError("%s is not supported by tensorrt" % dtype)
def torch_dtype_from_trt(dtype):
if dtype == trt.int8:
return torch.int8
elif trt_version() >= '7.0' and dtype == trt.bool:
return torch.bool
elif dtype == trt.int32:
return torch.int32
elif dtype == trt.float16:
return torch.float16
elif dtype == trt.float32:
return torch.float32
else:
raise TypeError("%s is not supported by torch" % dtype)
def torch_device_to_trt(device):
if device.type == torch.device("cuda").type:
return trt.TensorLocation.DEVICE
elif device.type == torch.device("cpu").type:
return trt.TensorLocation.HOST
else:
return TypeError("%s is not supported by tensorrt" % device)
def torch_device_from_trt(device):
if device == trt.TensorLocation.DEVICE:
return torch.device("cuda")
elif device == trt.TensorLocation.HOST:
return torch.device("cpu")
else:
return TypeError("%s is not supported by torch" % device)
def trt_num_inputs(engine):
count = 0
for i in range(engine.num_bindings):
if engine.binding_is_input(i):
count += 1
return count
def trt_num_outputs(engine):
count = 0
for i in range(engine.num_bindings):
if not engine.binding_is_input(i):
count += 1
return count
def torch_dim_resolve_negative(dim, ndim):
if not isinstance(dim, tuple):
dim = (dim,)
pos = []
for d in dim:
if d < 0:
d = ndim + d
pos.append(d)
return tuple(pos)
def torch_dim_to_trt_axes(dim):
"""Converts torch dim, or tuple of dims to a tensorrt axes bitmask"""
if not isinstance(dim, tuple):
dim = (dim,)
# create axes bitmask for reduce layer
axes = 0
for d in dim:
axes |= 1 << d
return axes
def add_trt_constant(network, tensor):
shape = tuple(tensor.shape)
array = tensor[0].detach().cpu().numpy()
layer = network.add_constant(shape, array)
return layer.get_output(0)
def check_torch_dtype(*tensors):
dtype = None
for t in tensors:
if isinstance(t, torch.Tensor):
if dtype is None:
dtype = t.dtype
else:
assert dtype == t.dtype # , 'Tensor data types must match')
assert (
dtype is not None
) # , 'Data type could not be inferred from any item in list')
return dtype
def add_missing_trt_tensors(network, tensors):
"""Creates missing TensorRT tensors as constants and attaches them to the Torch Tensors"""
with use_shape_wrapping(False):
trt_tensors = [None] * len(tensors)
dtype = check_torch_dtype(*tensors)
for i, t in enumerate(tensors):
trt_tensor = None
# GET TRT TENSOR (OR CREATE TRT CONSTANT)
# get tensor w/ _trt
# or... add constant for scalar primitive
if hasattr(t, "_trt") or isinstance(t, IntWrapper):
trt_tensor = t._trt
elif isinstance(t, float) or isinstance(t, int):
shape = (1,)
scalar = t * torch.ones(shape, dtype=dtype).cpu().numpy()
trt_tensor = network.add_constant(shape, scalar).get_output(0)
# or... add constant for leaf tensor w/o _trt
else:
# remove all preceding ones, these can be re-inserted later when broadcasting
num_preceding_ones = 0
for j in range(len(t.shape)):
if int(t.shape[j]) == 1:
num_preceding_ones += 1
else:
break
shape = tuple(t.shape[num_preceding_ones:])
weight = t.detach().cpu().numpy()
t._trt = network.add_constant(shape, weight).get_output(0)
trt_tensor = t._trt
assert trt_tensor is not None
trt_tensors[i] = trt_tensor
return trt_tensors
def broadcast_trt_tensors(network, trt_tensors, broadcast_ndim):
"""Broadcast TensorRT tensors to the specified dimension by pre-padding shape 1 dims"""
with use_shape_wrapping(False):
broadcasted_trt_tensors = [None] * len(trt_tensors)
for i, t in enumerate(trt_tensors):
if len(t.shape) < broadcast_ndim:
# append 1 size dims to front
diff = broadcast_ndim - len(t.shape)
shape = tuple([1] * diff + list(t.shape))
layer = network.add_shuffle(t)
layer.reshape_dims = shape
trt_tensor = layer.get_output(0)
else:
trt_tensor = t
broadcasted_trt_tensors[i] = trt_tensor
return broadcasted_trt_tensors
def trt_(network, *tensors):
"""Creates missing TensorRT tensors and adds shuffle layers to make tensors broadcastable"""
with use_shape_wrapping(False):
trt_tensors = [None] * len(tensors)
dtype = check_torch_dtype(*tensors)
# get broadcast dimension
broadcast_num_dim = 0
for t in tensors:
if isinstance(t, torch.Tensor):
if not hasattr(t, "_trt"):
num_dim = len(t.shape) # don't exclude batch for constants
else:
num_dim = len(
t._trt.shape
) # non-leaf tensors must already have _trt, get shape from that
if num_dim > broadcast_num_dim:
broadcast_num_dim = num_dim
for i, t in enumerate(tensors):
trt_tensor = None
# GET TRT TENSOR (OR CREATE TRT CONSTANT)
# get tensor w/ _trt
if (isinstance(t, torch.Tensor) and hasattr(t, "_trt")) or isinstance(t, IntWrapper):
trt_tensor = t._trt
# or... add constant for leaf tensor w/o _trt
elif isinstance(t, torch.Tensor) and not hasattr(t, "_trt"):
# add leaf tensor
shape = tuple(t.shape) # don't exclude batch when adding constants...?
weight = t.detach().cpu().numpy()
t._trt = network.add_constant(shape, weight).get_output(0)
trt_tensor = t._trt
# or... add constant for scalar primitive
elif isinstance(t, float) or isinstance(t, int):
shape = (1,) * broadcast_num_dim
scalar = t * torch.ones(shape, dtype=dtype).cpu().numpy()
trt_tensor = network.add_constant(shape, scalar).get_output(0)
assert trt_tensor is not None
# MAKE TRT TENSOR BROADCASTABLE IF IT IS NOT ALREADY
if len(trt_tensor.shape) < broadcast_num_dim:
# append 1 size dims to front
diff = broadcast_num_dim - len(trt_tensor.shape)
shape = tuple([1] * diff + list(trt_tensor.shape))
layer = network.add_shuffle(trt_tensor)
layer.reshape_dims = shape
trt_tensor = layer.get_output(0)
trt_tensors[i] = trt_tensor
if len(trt_tensors) == 1:
return trt_tensors[0]
else:
return tuple(trt_tensors)
# CONVERSION REGISTRY AND HOOKS
CONVERTERS = {}
def get_arg(ctx, name, pos, default):
if name in ctx.method_kwargs:
return ctx.method_kwargs[name]
elif len(ctx.method_args) > pos:
return ctx.method_args[pos]
else:
return default
def attach_converter(ctx, method, converter, method_str):
"""Gets a function that executes PyTorch method and TensorRT converter"""
global DUMMY_CONVERTERS
def wrapper(*args, **kwargs):
skip = True
# check if another (parent) converter has lock
if not ctx.lock:
if converter["is_real"]:
ctx.lock = True # only real converters can acquire lock
skip = False
# run original method
outputs = method(*args, **kwargs)
if not skip:
ctx.method_args = args
ctx.method_kwargs = kwargs
ctx.method_return = outputs
ctx.method_str = method_str
# print('%s' % (converter.__name__,))
converter["converter"](ctx)
# allow overwriting output, for things like shape converter
outputs = ctx.method_return
# convert to None so conversion will fail for unsupported layers
ctx.method_args = None
ctx.method_kwargs = None
ctx.method_return = None
ctx.lock = False
return outputs
return wrapper
class ConversionHook(object):
"""Attaches TensorRT converter to PyTorch method call"""
def __init__(self, ctx, key, converter):
self.ctx = ctx
self.key = key
self.converter = converter
def _set_method(self, method):
module = self.converter['module']
exec('module.%s = method' % self.converter['qual_name'])
def __enter__(self):
self._set_method(
attach_converter(
self.ctx, self.converter['method_impl'], self.converter, self.converter['method_str']
)
)
def __exit__(self, type, val, tb):
self._set_method(self.converter['method_impl'])
def default_input_names(num_inputs):
return ["input_%d" % i for i in range(num_inputs)]
def default_output_names(num_outputs):
return ["output_%d" % i for i in range(num_outputs)]
def device_type_str(device_type):
if device_type == trt.DeviceType.GPU:
return 'GPU'
elif device_type == trt.DeviceType.DLA:
return 'DLA'
class NetworkWrapper(object):
def __init__(self, ctx, network):
self._ctx = ctx
self._network = network
self._layer_counts = defaultdict(lambda: 0)
def _configure_layer(self, layer):
with use_shape_wrapping(False):
# set layer device type
device_type = self._ctx.current_device_type()
self._ctx.builder_config.set_device_type(layer, device_type)
orig_device_type = device_type
if not self._ctx.builder_config.can_run_on_DLA(layer) and device_type == trt.DeviceType.DLA:
if self._ctx.torch2trt_kwargs['gpu_fallback']:
device_type = trt.DeviceType.GPU # layer will fall back to GPU
# set layer name
def arg_str(arg):
if isinstance(arg, torch.Tensor):
return "tensor(shape=%s, dtype=%s)" % (str(list(arg.shape)), str(arg.dtype))
return str(arg)
scope_name = self._ctx.current_module_name()# + ':' + layer.type.name
self._layer_counts[scope_name] += 1
args = [arg_str(arg) for arg in self._ctx.method_args]
kwargs = ["%s=%s" % (key, arg_str(arg)) for key, arg in self._ctx.method_kwargs.items()]
layer.name = scope_name + ':' + str(self._layer_counts[scope_name] - 1) + ':' + layer.type.name + ':' + device_type_str(device_type)
if orig_device_type != device_type:
layer.name = layer.name + '(' + device_type_str(orig_device_type) + ')'
# "%s [%s #%d, %s] %s(%s)" % (self._ctx.current_module_name(), layer.type.name, self._layer_counts[layer.type.name], device_type_str(device_type),
# self._ctx.method_str, ", ".join(args + kwargs))
def __getattr__(self, name):
attr = getattr(self._network, name)
if callable(attr):
def wrapper(*args, **kwargs):
ret = attr(*args, **kwargs)
if isinstance(ret, trt.ILayer):
self._configure_layer(ret)
return ret
return wrapper
else:
return attr
_ACTIVE_CONVERSION_CONTEXT = None
def get_conversion_context():
return _ACTIVE_CONVERSION_CONTEXT
class ConversionContext(object):
def __init__(self, network, converters=CONVERTERS, torch2trt_kwargs=None, builder_config=None, logger=None):
self.network = NetworkWrapper(self, network)
self.lock = False
self.method_args = None
self.method_kwargs = None
self.method_return = None
self.torch2trt_kwargs = torch2trt_kwargs
self.builder_config = builder_config
self.hooks = [
ConversionHook(self, key, converter)
for key, converter in converters.items()
]
self.module_stack = []
self.module_handles = []
self.device_type_stack = []
self.module_name_map = {}
for name, module in torch2trt_kwargs['module'].named_modules():
self.module_name_map[module] = name
self.logger = logger
def current_module_name(self):
return self.get_module_name(self.current_module())
def current_module(self):
return self.module_stack[-1]
def get_module_name(self, module):
return self.module_name_map[module]
def _module_pre_hook(self, module, input):
# TODO(@jwelsh): add logging to show module entry / exit
self.module_stack.append(module)
# hook that is attached to modulee using register_forward_pre_hook, which is called before module is executed
if module in self.torch2trt_kwargs['device_types']:
device_type = self.torch2trt_kwargs['device_types'][module]
self.device_type_stack.append((module, device_type))
def _module_post_hook(self, module, input, output):
# if module was used to set the current device type, pop device type from stack
if self.current_device_type_module() == module:
self.device_type_stack.pop()
self.module_stack.pop()
def current_device_type(self):
"""Returns the current device type"""
if len(self.device_type_stack) > 0:
return self.device_type_stack[-1][1]
else:
return self.torch2trt_kwargs['default_device_type']
def current_device_type_module(self):
"""Returns the module which controls the current device type"""
if len(self.device_type_stack) > 0:
return self.device_type_stack[-1][0]
else:
return None
def __enter__(self):
global _ACTIVE_CONVERSION_CONTEXT
# attach hooks which add converters to methods
for hook in self.hooks:
hook.__enter__()
# attach hooks which control the current device type
for name, module in self.torch2trt_kwargs['module'].named_modules():
pre_hook_handle = module.register_forward_pre_hook(self._module_pre_hook)
post_hook_handle = module.register_forward_hook(self._module_post_hook)
self.module_handles.append(pre_hook_handle)
self.module_handles.append(post_hook_handle)
_ACTIVE_CONVERSION_CONTEXT = self
torch.Tensor.size = _size_wrapper
torch.Tensor.__getattribute__ = _new_getattr
return self
def __exit__(self, type, val, tb):
global _ACTIVE_CONVERSION_CONTEXT
for hook in self.hooks:
hook.__exit__(type, val, tb)
for handle in self.module_handles:
handle.remove()
_ACTIVE_CONVERSION_CONTEXT = None
torch.Tensor.size = _original_size
torch.Tensor.__getattribute__ = _old_getattr
def add_inputs(self, torch_inputs, names=None, dynamic_axes=None):
if names is None:
names = default_input_names(len(torch_inputs))
self.input_names = names
for i, torch_input in enumerate(torch_inputs):
if not hasattr(torch_input, "_trt"):
shape = list(torch_input.shape)
if dynamic_axes is not None:
for dim in dynamic_axes[i]:
shape[dim] = -1
shape = tuple(shape)
trt_tensor = self.network.add_input(
name=names[i],
shape=shape,
dtype=torch_dtype_to_trt(torch_input.dtype),
)
trt_tensor.location = torch_device_to_trt(torch_input.device)
torch_input._trt = trt_tensor
def mark_outputs(self, torch_outputs, names=None):
if names is None:
names = default_output_names(len(torch_outputs))
self.output_names = names
for i, torch_output in enumerate(torch_outputs):
trt_tensor = torch_output._trt
trt_tensor.name = names[i]
trt_tensor.location = torch_device_to_trt(torch_output.device)
trt_tensor.dtype = torch_dtype_to_trt(torch_output.dtype)
self.network.mark_output(trt_tensor)
class TRTModule(torch.nn.Module):
def __init__(self, engine=None, input_names=None, output_names=None, input_flattener=None, output_flattener=None):
super(TRTModule, self).__init__()
self._register_state_dict_hook(TRTModule._on_state_dict)
self.engine = engine
if self.engine is not None:
self.context = self.engine.create_execution_context()
self.input_names = input_names
self.output_names = output_names
self.input_flattener = input_flattener
self.output_flattener = output_flattener
def _on_state_dict(self, state_dict, prefix, local_metadata):
state_dict[prefix + "engine"] = bytearray(self.engine.serialize())
state_dict[prefix + "input_names"] = self.input_names
state_dict[prefix + "output_names"] = self.output_names
state_dict[prefix + "input_flattener"] = self.input_flattener.dict()
state_dict[prefix + "output_flattener"] = self.output_flattener.dict()
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
engine_bytes = state_dict[prefix + "engine"]
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
self.engine = runtime.deserialize_cuda_engine(engine_bytes)
self.context = self.engine.create_execution_context()
self.input_names = state_dict[prefix + "input_names"]
self.output_names = state_dict[prefix + "output_names"]
if 'input_flattener' in state_dict:
self.input_flattener = Flattener.from_dict(state_dict['input_flattener'])
else:
self.input_flattener = None
if 'output_flattener' in state_dict:
self.output_flattener = Flattener.from_dict(state_dict['output_flattener'])
else:
self.output_flattener = None
def forward(self, *inputs):
bindings = [None] * (len(self.input_names) + len(self.output_names))
if self.input_flattener is not None:
inputs = self.input_flattener.flatten(inputs)
for i, input_name in enumerate(self.input_names):
idx = self.engine.get_binding_index(input_name)
shape = tuple(inputs[i].shape)
bindings[idx] = inputs[i].contiguous().data_ptr()
self.context.set_binding_shape(idx, shape)
# create output tensors
outputs = [None] * len(self.output_names)
for i, output_name in enumerate(self.output_names):
idx = self.engine.get_binding_index(output_name)
dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx))
shape = tuple(self.context.get_binding_shape(idx))
device = torch_device_from_trt(self.engine.get_location(idx))
output = torch.empty(size=shape, dtype=dtype, device=device)
outputs[i] = output
bindings[idx] = output.data_ptr()
self.context.execute_async_v2(
bindings, torch.cuda.current_stream().cuda_stream
)
if self.output_flattener is not None:
outputs = self.output_flattener.unflatten(outputs)
else:
outputs = tuple(outputs)
if len(outputs) == 1:
outputs = outputs[0]
return outputs
def enable_profiling(self):
if not self.context.profiler:
self.context.profiler = trt.Profiler()
def infer_dynamic_axes(min_shapes_flat, max_shapes_flat):
dynamic_axes = [[] for i in range(len(min_shapes_flat))]
for i, (mins, maxs) in enumerate(zip(min_shapes_flat, max_shapes_flat)):
for j, (mins_i, maxs_i) in enumerate(zip(mins, maxs)):
if mins_i != maxs_i:
dynamic_axes[i].append(j)
return dynamic_axes
def torch2trt(module,
inputs,
input_names=None,
output_names=None,
log_level=trt.Logger.ERROR,
fp16_mode=False,
max_workspace_size=1<<25,
strict_type_constraints=False,
keep_network=True,
int8_mode=False,
int8_calib_dataset=None,
int8_calib_algorithm=DEFAULT_CALIBRATION_ALGORITHM,
use_onnx=False,
default_device_type=trt.DeviceType.GPU,
dla_core=0,
gpu_fallback=True,
device_types={},
min_shapes=None,
max_shapes=None,
opt_shapes=None,
onnx_opset=None,
max_batch_size=None,
avg_timing_iterations=None,
**kwargs):
# capture arguments to provide to context
kwargs.update(locals())
kwargs.pop('kwargs')
# handle inputs as dataset of list of tensors
if issubclass(inputs.__class__, Dataset):
dataset = inputs
if len(dataset) == 0:
raise ValueError('Dataset must have at least one element to use for inference.')
inputs = dataset[0]
else:
dataset = ListDataset()
dataset.insert(inputs)
inputs = dataset[0]
outputs = module(*inputs)
input_flattener = Flattener.from_value(inputs)
output_flattener = Flattener.from_value(outputs)
# infer default parameters from dataset
if min_shapes == None:
min_shapes_flat = [tuple(t) for t in dataset.min_shapes(flat=True)]
else:
min_shapes_flat = input_flattener.flatten(min_shapes)
if max_shapes == None:
max_shapes_flat = [tuple(t) for t in dataset.max_shapes(flat=True)]
else:
max_shapes_flat = input_flattener.flatten(max_shapes)
if opt_shapes == None:
opt_shapes_flat = [tuple(t) for t in dataset.median_numel_shapes(flat=True)]
else:
opt_shapes_flat = input_flattener.flatten(opt_shapes)
# handle legacy max_batch_size
if max_batch_size is not None:
min_shapes_flat = [(1,) + s[1:] for s in min_shapes_flat]
max_shapes_flat = [(max_batch_size,) + s[1:] for s in max_shapes_flat]
dynamic_axes_flat = infer_dynamic_axes(min_shapes_flat, max_shapes_flat)
if default_device_type == trt.DeviceType.DLA:
for value in dynamic_axes_flat:
if len(value) > 0:
raise ValueError('Dataset cannot have multiple shapes when using DLA')
logger = trt.Logger(log_level)
builder = trt.Builder(logger)
config = builder.create_builder_config()
if input_names is None:
input_names = default_input_names(input_flattener.size)
if output_names is None:
output_names = default_output_names(output_flattener.size)
if use_onnx:
import onnx_graphsurgeon as gs
import onnx
module_flat = Flatten(module, input_flattener, output_flattener)
inputs_flat = input_flattener.flatten(inputs)
f = io.BytesIO()
torch.onnx.export(
module_flat,
inputs_flat,
f,
input_names=input_names,
output_names=output_names,
dynamic_axes={
name: {int(axis): 'axis_%d' % axis for axis in dynamic_axes_flat[index]}
for index, name in enumerate(input_names)
},
opset_version=onnx_opset
)
f.seek(0)
onnx_graph = gs.import_onnx(onnx.load(f))
onnx_graph.fold_constants().cleanup()
f = io.BytesIO()
onnx.save(gs.export_onnx(onnx_graph), f)
f.seek(0)
onnx_bytes = f.read()
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
parser = trt.OnnxParser(network, logger)
parser.parse(onnx_bytes)
else:
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
with ConversionContext(network, torch2trt_kwargs=kwargs, builder_config=config, logger=logger) as ctx:
inputs_flat = input_flattener.flatten(inputs)
ctx.add_inputs(inputs_flat, input_names, dynamic_axes=dynamic_axes_flat)
outputs = module(*inputs)
outputs_flat = output_flattener.flatten(outputs)
ctx.mark_outputs(outputs_flat, output_names)
# set max workspace size
config.max_workspace_size = max_workspace_size
# set number of avg timing itrs.
if avg_timing_iterations is not None:
config.avg_timing_iterations = avg_timing_iterations
if fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
config.default_device_type = default_device_type
if gpu_fallback:
config.set_flag(trt.BuilderFlag.GPU_FALLBACK)
config.DLA_core = dla_core
if strict_type_constraints:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if int8_mode:
# default to use input tensors for calibration
if int8_calib_dataset is None:
int8_calib_dataset = dataset
config.set_flag(trt.BuilderFlag.INT8)
#Making sure not to run calibration with QAT mode on
if not 'qat_mode' in kwargs:
calibrator = DatasetCalibrator(
int8_calib_dataset, algorithm=int8_calib_algorithm
)
config.int8_calibrator = calibrator
# OPTIMIZATION PROFILE
profile = builder.create_optimization_profile()
for index, name in enumerate(input_names):
profile.set_shape(
name,
min_shapes_flat[index],
opt_shapes_flat[index],
max_shapes_flat[index]
)
config.add_optimization_profile(profile)
if int8_mode:
config.set_calibration_profile(profile)
# BUILD ENGINE
engine = builder.build_engine(network, config)
module_trt = TRTModule(engine, input_names, output_names, input_flattener=input_flattener, output_flattener=output_flattener)
if keep_network:
module_trt.network = network
return module_trt
# DEFINE ALL CONVERSION FUNCTIONS
def get_module_qualname(name):
s = name.split('.')
for i in range(len(s)):
idx = len(s) - i - 1
modulename, qualname = ".".join(s[:idx]), ".".join(s[idx:])
try:
module = importlib.import_module(modulename)
return module, modulename, qualname
except:
pass
raise RuntimeError("Could not import module")
def tensorrt_converter(method, is_real=True, enabled=True, imports=[]):
if isinstance(method, str):
module, module_name, qual_name = get_module_qualname(method)
else:
module, module_name, qual_name = importlib.import_module(method.__module__), method.__module__, method.__qualname__
try:
method_impl = eval('copy.deepcopy(module.%s)' % qual_name)
except:
enabled = False
def register_converter(converter):
CONVERTERS[method] = {
"converter": converter,
"is_real": is_real,
"module": module,
"module_name": module_name,
"qual_name": qual_name,
"method_str": module_name + '.' + qual_name,
"method_impl": method_impl
}
return converter
def pass_converter(converter):
return converter
if enabled:
return register_converter
else:
return pass_converter
return register_converter
def set_layer_precision(ctx, layer):
# Supported TRT precisions as given by torch2trt_kwargs.
INT8_MODE = "int8_mode"
FP16_MODE = "fp16_mode"
# Check that args exist as expected in torch2trt_kwargs.
trt_kwargs = ctx.torch2trt_kwargs
assert INT8_MODE in trt_kwargs
assert FP16_MODE in trt_kwargs
is_int8 = trt_kwargs.get(INT8_MODE, False)
is_fp16 = trt_kwargs.get(FP16_MODE, False)
if is_int8:
layer.precision = trt.int8
layer.set_output_type(0, trt.int8)
elif is_fp16:
layer.precision = trt.float16
layer.set_output_type(0, trt.float16)
# from torch2trt.torch2trt import (
# torch2trt,
# trt,
# tensorrt_converter,
# get_conversion_context,
# get_arg
# )
# SHAPE WRAPPING
_int = int
_tuple = tuple
_int_mul = int.__mul__
_int_add = int.__add__
_int_sub = int.__sub__
_int_floordiv = int.__floordiv__
class IntWrapper(int):
@property
def _trt(self):
if not hasattr(self, '_raw_trt'):
ctx = get_conversion_context()
self._raw_trt = ctx.network._network.add_constant([1], np.array([_int(self)], dtype=np.int32)).get_output(0)
return self._raw_trt
# lhs ops
def __mul__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_mul(self, x))
result._raw_trt = ctx.network._network.add_elementwise(self._trt, x._trt, trt.ElementWiseOperation.PROD).get_output(0)
return result
def __add__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_add(self, x))
result._raw_trt = ctx.network._network.add_elementwise(self._trt, x._trt, trt.ElementWiseOperation.SUM).get_output(0)
return result
def __sub__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_sub(self, x))
result._raw_trt = ctx.network._network.add_elementwise(self._trt, x._trt, trt.ElementWiseOperation.SUB).get_output(0)
return result
def __floordiv__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_floordiv(self, x))
result._raw_trt = ctx.network._network.add_elementwise(self._trt, x._trt, trt.ElementWiseOperation.FLOOR_DIV).get_output(0)
return result
# rhs ops
def __rmul__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_mul(x, self))
result._raw_trt = ctx.network._network.add_elementwise(x._trt, self._trt, trt.ElementWiseOperation.PROD).get_output(0)
return result
def __radd__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_add(x, self))
result._raw_trt = ctx.network._network.add_elementwise(x._trt, self._trt, trt.ElementWiseOperation.SUM).get_output(0)
return result
def __rsub__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_sub(x, self))
result._raw_trt = ctx.network._network.add_elementwise(x._trt, self._trt, trt.ElementWiseOperation.SUB).get_output(0)
return result
def __rfloordiv__(self, x):
if not isinstance(x, IntWrapper):
x = IntWrapper(x)
ctx = get_conversion_context()
result = IntWrapper(_int_floordiv(x, self))
result._raw_trt = ctx.network._network.add_elementwise(x._trt, self._trt, trt.ElementWiseOperation.FLOOR_DIV).get_output(0)
return result
def __int__(self):
return self
def make_int_wrapper(x):
if isinstance(x, IntWrapper):
return x
else:
return IntWrapper(x)
class SizeWrapper(tuple):
@property
def _trt(self):
if not hasattr(self, '_raw_trt'):
ctx = get_conversion_context()
self._raw_trt = ctx.network._network.add_concatenation([d._trt for d in self]).get_output(0)
return self._raw_trt
def __tuple__(self):
return self
def wrap_ints(x):
for y in x:
yield make_int_wrapper(y)
def make_size_wrapper(args):
return SizeWrapper(wrap_ints(args))
_original_size = torch.Tensor.size
_original_getattr = torch.Tensor.__getattribute__
def _size_wrapper(input, dim=None):
if not hasattr(input, '_trt'):
if dim is not None:
return _original_size(input, dim)
else:
return _original_size(input)
ctx = get_conversion_context()
output = _original_size(input)
output = make_size_wrapper(output)
shape_trt = ctx.network._network.add_shape(input._trt).get_output(0)
for i, d in enumerate(output):
d._raw_trt = ctx.network._network.add_slice(shape_trt, [i], [1], [1]).get_output(0)
if dim is not None:
output = output[dim]
return output
_old_getattr = torch.Tensor.__getattribute__
def _new_getattr(self, name):
if name == 'shape' and use_shape_wrapping.stack[0]:
return _size_wrapper(self)
else:
return _old_getattr(self, name)
class use_shape_wrapping:
stack = [True] # default true
def __init__(self, value: bool):
self._value = value
def __enter__(self, *args, **kwargs):
self.stack.insert(0, self._value)
def __exit__(self, *args, **kwargs):
self.stack.pop(0)
| 35,112 | 31.243343 | 158 | py |
torch2trt | torch2trt-master/torch2trt/tests/test_contiguous.py | import torch
from torch2trt import torch2trt
def test_contiguous():
torch.manual_seed(0)
net = torch.nn.Conv2d(3, 10, kernel_size=3)
net.eval().cuda()
test_tensor = torch.randn((1, 25, 25, 3)).cuda().permute((0, 3, 1, 2))
with torch.no_grad():
test_out = net(test_tensor)
with torch.no_grad():
trt_net = torch2trt(net, [test_tensor])
test_trt_out = trt_net(test_tensor)
delta = torch.max((test_out.contiguous() - test_trt_out.contiguous()).abs())
assert delta < 1e-3, f"Delta: {delta}"
| 551 | 22 | 80 | py |
torch2trt | torch2trt-master/torch2trt/tests/test_flatten_dynamic.py | import pytest
from torch2trt import torch2trt, trt
import torch
class FlattenModule(torch.nn.Module):
def __init__(self, start_dim, end_dim):
super().__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, x):
return torch.flatten(x, self.start_dim, self.end_dim)
def test_flatten_dynamic_0_n1():
# 0, -1
module = FlattenModule(start_dim=0, end_dim=-1).cuda().eval()
x = torch.randn(1, 4, 5).cuda()
module_trt = torch2trt(module, [x], max_batch_size=4, log_level=trt.Logger.VERBOSE)
x = torch.randn(1, 4, 5).cuda()
assert(torch.allclose(module(x), module_trt(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 4, 5).cuda()
assert(torch.allclose(module(x), module_trt(x), atol=1e-2, rtol=1e-2))
def test_flatten_dynamic_1_n1():
# 1, -1
module = FlattenModule(start_dim=1, end_dim=-1).cuda().eval()
x = torch.randn(1, 4, 5).cuda()
module_trt = torch2trt(module, [x], max_batch_size=4, log_level=trt.Logger.VERBOSE)
x = torch.randn(1, 4, 5).cuda()
assert(torch.allclose(module(x), module_trt(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 4, 5).cuda()
assert(torch.allclose(module(x), module_trt(x), atol=1e-2, rtol=1e-2))
def test_flatten_dynamic_0_1():
# 0, 1
module = FlattenModule(start_dim=0, end_dim=1).cuda().eval()
x = torch.randn(1, 4, 5).cuda()
module_trt = torch2trt(module, [x], max_batch_size=4, log_level=trt.Logger.VERBOSE)
x = torch.randn(1, 4, 5).cuda()
assert(torch.allclose(module(x), module_trt(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 4, 5).cuda()
assert(torch.allclose(module(x), module_trt(x), atol=1e-2, rtol=1e-2))
if __name__ == '__main__':
test_flatten_dynamic_0_1() | 1,781 | 26.84375 | 87 | py |
torch2trt | torch2trt-master/torch2trt/tests/test_tensor_shape_div_batch.py | import pytest
import torch
from torch2trt import torch2trt, trt
def test_div_constant_batch():
class DivConstantBatch(torch.nn.Module):
def __init__(self):
super(DivConstantBatch, self).__init__()
self.register_buffer('y', torch.ones((1, 3, 10, 10)))
def forward(self, x):
return x / self.y
module = DivConstantBatch().cuda().eval()
x = torch.randn(1, 3, 10, 10).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE)
assert torch.allclose(module_trt(x), module(x), atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
test_div_constant_batch()
| 648 | 23.961538 | 73 | py |
torch2trt | torch2trt-master/torch2trt/tests/test_tensor_ne.py | import pytest
import torch
from torch2trt import torch2trt, trt
def test_tensor_ne():
class NotEqual(torch.nn.Module):
def __init__(self):
super(NotEqual, self).__init__()
def forward(self, x, y):
return x != y
module = NotEqual().cuda().eval()
x = torch.randn(1, 3, 40, 20).cuda()
y = torch.randn(1, 3, 1, 20).cuda()
module_trt = torch2trt(module, [x, y], log_level=trt.Logger.VERBOSE)
assert torch.all(module_trt(x, y) == module(x, y))
if __name__ == "__main__":
test_tensor_ne() | 562 | 21.52 | 72 | py |
torch2trt | torch2trt-master/torch2trt/tests/test_interpolate_dynamic.py | import pytest
import torch
import torch.nn.functional as F
from torch2trt import (
torch2trt,
trt
)
def test_interpolate_dynamic_size():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return F.interpolate(x, size=(size[2]*2, size[3]*3))
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, min_shapes=[(1, 3, 32, 32)], max_shapes=[(4, 3, 64, 64)], opt_shapes=[(1, 3, 32, 32)])
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 64, 64).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
def test_interpolate_dynamic_shape():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.shape
return F.interpolate(x, size=(size[2]*2, size[3]*3))
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, min_shapes=[(1, 3, 32, 32)], max_shapes=[(4, 3, 64, 64)], opt_shapes=[(1, 3, 32, 32)])
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 64, 64).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
| 1,622 | 30.211538 | 156 | py |
torch2trt | torch2trt-master/torch2trt/tests/test_tensor_shape.py | import pytest
import torch
import torch.nn.functional as F
from torch2trt import (
torch2trt,
trt,
SizeWrapper,
tensorrt_converter
)
def test_tensor_shape_view_trivial():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return x.view(size)
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, max_batch_size=4)
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
def test_tensor_shape_view_mul():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return x.view(size[0] * size[1], size[2] * size[3])
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, max_batch_size=4)
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
def test_tensor_shape_view_mul():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return x.view(size[0] * size[1], size[2] * size[3])
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, max_batch_size=4)
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
def test_tensor_shape_view_mul_cast():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return x.view(size[0] * int(size[1]), int(size[2] * size[3]))
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, max_batch_size=4)
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
def test_tensor_shape_view_mul_const_lhs():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return x.view(size[0] * 1, size[1], size[2] * size[3])
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, max_batch_size=4)
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
def test_tensor_shape_view_mul_const_rhs():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return x.view(1 * size[0], size[1], size[2] * size[3])
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, max_batch_size=4)
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
x = torch.randn(4, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
def test_tensor_shape_view_static():
class TestModule(torch.nn.Module):
def forward(self, x):
size = x.size()
return x.view(1, 3, 32, 32)
module = TestModule().cuda().eval()
x = torch.randn(1, 3, 32, 32).cuda()
module_trt = torch2trt(module, [x], log_level=trt.Logger.VERBOSE, max_batch_size=4)
x = torch.randn(1, 3, 32, 32).cuda()
assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
# x = torch.randn(4, 3, 32, 32).cuda()
# assert(torch.allclose(module_trt(x), module(x), atol=1e-2, rtol=1e-2))
if __name__ == '__main__':
test_tensor_shape_view_mul() | 4,515 | 27.948718 | 87 | py |
torch2trt | torch2trt-master/torch2trt/tests/test_legacy_max_batch_size.py | import torch.nn as nn
import torch
from torch2trt import torch2trt
def test_legacy_max_batch_size():
model = nn.Conv2d(3, 6, kernel_size=1).cuda().eval()
data = torch.randn(1, 3, 32, 32).cuda()
model_trt = torch2trt(model, [data], max_batch_size=4)
data = torch.randn(1, 3, 32, 32).cuda()
out = model(data)
out_trt = model_trt(data)
assert(torch.allclose(out, out_trt, atol=1e-3, rtol=1e-3))
data = torch.randn(4, 3, 32, 32).cuda()
out = model(data)
out_trt = model_trt(data)
assert(torch.allclose(out, out_trt, atol=1e-3, rtol=1e-3))
def test_legacy_max_batch_size_conv1d():
model = nn.Conv1d(10, 20, kernel_size=1).cuda().eval()
data = torch.randn(1, 10, 32).cuda()
model_trt = torch2trt(model, [data], max_batch_size=4, use_onnx=False)
data = torch.randn(1, 10, 32).cuda()
out = model(data)
out_trt = model_trt(data)
assert(torch.allclose(out, out_trt, atol=1e-3, rtol=1e-3))
data = torch.randn(4, 10, 32).cuda()
out = model(data)
out_trt = model_trt(data)
assert(torch.allclose(out, out_trt, atol=1e-3, rtol=1e-3))
if __name__ == '__main__':
test_legacy_max_batch_size_conv1d() | 1,195 | 22.45098 | 74 | py |
torch2trt | torch2trt-master/torch2trt/tests/timm/test_maxvit.py | import pytest
from torch2trt import torch2trt, trt
from timm.models.maxxvit import (
maxvit_tiny_224,
maxvit_tiny_224,
maxvit_rmlp_pico_rw_256,
maxvit_rmlp_small_rw_224
)
import torch
def _cross_validate_module(model, shape=(224, 224)):
data = torch.randn(1, 3, *shape).cuda()
model_trt = torch2trt(model, [data])
out = model(data)
out_trt = model_trt(data)
assert torch.allclose(out, out_trt, rtol=1e-2, atol=1e-2)
def test_maxvit_tiny_rw_224():
_cross_validate_module(maxvit_tiny_rw_224().cuda().eval(), (224, 224))
def test_maxvit_tiny_224():
_cross_validate_module(maxvit_tiny_224().cuda().eval(), (224, 224))
def test_maxvit_rmlp_small_rw_224():
_cross_validate_module(maxvit_rmlp_small_rw_224().cuda().eval(), (224, 224))
if __name__ == "__main__":
test_maxvit_tiny_rw_224() | 842 | 24.545455 | 80 | py |
torch2trt | torch2trt-master/torch2trt/tests/torchvision/classification.py | import torch
import torchvision
from torch2trt.module_test import add_module_test
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def alexnet():
return torchvision.models.alexnet(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def squeezenet1_0():
return torchvision.models.squeezenet1_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def squeezenet1_1():
return torchvision.models.squeezenet1_1(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet18():
return torchvision.models.resnet18(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet34():
return torchvision.models.resnet34(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet50():
return torchvision.models.resnet50(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet101():
return torchvision.models.resnet101(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def resnet152():
return torchvision.models.resnet152(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet121():
return torchvision.models.densenet121(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet169():
return torchvision.models.densenet169(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet201():
return torchvision.models.densenet201(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def densenet161():
return torchvision.models.densenet161(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg11():
return torchvision.models.vgg11(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg13():
return torchvision.models.vgg13(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg16():
return torchvision.models.vgg16(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg19():
return torchvision.models.vgg19(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg11_bn():
return torchvision.models.vgg11_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg13_bn():
return torchvision.models.vgg13_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg16_bn():
return torchvision.models.vgg16_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def vgg19_bn():
return torchvision.models.vgg19_bn(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mobilenet_v2():
return torchvision.models.mobilenet_v2(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x0_5():
return torchvision.models.shufflenet_v2_x0_5(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x1_0():
return torchvision.models.shufflenet_v2_x1_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x1_5():
return torchvision.models.shufflenet_v2_x1_5(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def shufflenet_v2_x2_0():
return torchvision.models.shufflenet_v2_x2_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet0_5():
return torchvision.models.mnasnet0_5(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet0_75():
return torchvision.models.mnasnet0_75(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet1_0():
return torchvision.models.mnasnet1_0(pretrained=False)
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def mnasnet1_3():
return torchvision.models.mnasnet1_3(pretrained=False) | 4,998 | 32.777027 | 89 | py |
torch2trt | torch2trt-master/torch2trt/tests/torchvision/segmentation.py | import torch
import torchvision
from torch2trt.module_test import add_module_test
class ModelWrapper(torch.nn.Module):
def __init__(self, model):
super(ModelWrapper, self).__init__()
self.model = model
def forward(self, x):
return self.model(x)['out']
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def deeplabv3_resnet50():
bb = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False)
model = ModelWrapper(bb)
return model
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def deeplabv3_resnet101():
bb = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=False)
model = ModelWrapper(bb)
return model
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def fcn_resnet50():
bb = torchvision.models.segmentation.fcn_resnet50(pretrained=False)
model = ModelWrapper(bb)
return model
@add_module_test(torch.float16, torch.device('cuda'), [(1, 3, 224, 224)], fp16_mode=True)
def fcn_resnet101():
bb = torchvision.models.segmentation.fcn_resnet101(pretrained=False)
model = ModelWrapper(bb)
return model | 1,239 | 30.794872 | 91 | py |
torch2trt | torch2trt-master/torch2trt/tests/torchvision/save_load.py | from torch2trt import *
import torchvision
import torch
from .segmentation import deeplabv3_resnet50
if __name__ == '__main__':
model = deeplabv3_resnet50().cuda().eval().half()
data = torch.randn((1, 3, 224, 224)).cuda().half()
print('Running torch2trt...')
model_trt = torch2trt(model, [data], fp16_mode=True, max_workspace_size=1<<25)
print('Saving model...')
torch.save(model_trt.state_dict(), '.test_model.pth')
print('Loading model...')
model_trt_2 = TRTModule()
model_trt_2.load_state_dict(torch.load('.test_model.pth'))
assert(model_trt_2.engine is not None)
print(torch.max(torch.abs(model_trt_2(data) - model(data))))
print(torch.max(torch.abs(model_trt_2(data) - model_trt(data)))) | 755 | 30.5 | 82 | py |
torch2trt | torch2trt-master/torch2trt/contrib/qat/layers/_utils.py | import torch
import copy
import inspect
from absl import logging
from torch import nn
from pytorch_quantization.nn import TensorQuantizer as TQ
from pytorch_quantization.tensor_quant import QuantDescriptor, QUANT_DESC_8BIT_PER_TENSOR
'''
Currently Nvidia quantization library quantizes the input of the conv layer as opposed to output of ReLU.
utilities classes and functions mentioned below are going to help us map int8 layers correctly to TensorRT layers.
'''
class QuantWeightMixin():
"""Mixin class for adding basic quantization logic to quantized modules"""
default_quant_desc_weight = QUANT_DESC_8BIT_PER_TENSOR
@classmethod
def set_default_quant_desc_input(cls, value):
"""
Args:
value: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_weight = copy.deepcopy(value)
def init_quantizer(self, quant_desc_weight):
"""Helper function for __init__ of simple quantized module
Create weight quantizer based on quant_desc passed by kwargs, or default of the class.
Args:
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
"""
if not inspect.stack()[1].function == "__init__":
raise TypeError("{} should be only called by __init__ of quantized module.".format(__name__))
self._fake_quant = True
if not quant_desc_weight.fake_quant:
raise ValueError("Only fake quantization is supported!")
logging.info("Input is %squantized to %d bits in %s with axis %s!", ""
if not quant_desc_weight.fake_quant else "fake ",
quant_desc_weight.num_bits, self.__class__.__name__, quant_desc_weight.axis)
self._weight_quantizer = TQ(quant_desc_weight)
# pylint:disable=missing-docstring
@property
def weight_quantizer(self):
return self._weight_quantizer
# pylint:enable=missing-docstring
def pop_quant_desc_in_kwargs(quant_cls, input_only=False,weight_only=False, **kwargs):
"""Pop quant descriptors in kwargs
If there is no descriptor in kwargs, the default one in quant_cls will be used
Arguments:
quant_cls: A class that has default quantization descriptors
input_only: A boolean. If True, pop quant_desc_input only, not quant_desc_weight. Default false.
Keyword Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Note: Original function doesnt pop quant_desc_weight
"""
if input_only:
quant_desc_input = kwargs.pop('quant_desc_input', quant_cls.default_quant_desc_input)
elif weight_only:
quant_desc_weight = kwargs.pop('quant_desc_weight', quant_cls.default_quant_desc_weight)
else:
quant_desc_input = kwargs.pop('quant_desc_input', quant_cls.default_quant_desc_input)
quant_desc_weight = kwargs.pop('quant_desc_weight', quant_cls.default_quant_desc_weight)
# Check if anything is left in **kwargs
if kwargs:
raise TypeError("Unused keys: {}".format(kwargs.keys()))
if input_only:
return quant_desc_input
if weight_only:
return quant_desc_weight
return quant_desc_input, quant_desc_weight
'''
Inference Layers: At inference time, we dont need to carry entire qat library. We only need dynamic range so that layers
can be mapped to TRT layers at INT8.
'''
class TensorQuantizer(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('learned_amax',torch.tensor(1.0))
class QuantMixin():
def init_quantizer(self):
self._input_quantizer = TensorQuantizer()
self._weight_quantizer = TensorQuantizer()
@property
def input_quantizer(self):
return self._input_quantizer
@property
def weight_quantizer(self):
return self._weight_quantizer
class QuantMixinInput():
def init_quantizer(self):
self._input_quantizer = TensorQuantizer()
@property
def input_quantizer(self):
return self._input_quantizer
class QuantMixinWeight():
def init_quantizer(self):
self._weight_quantizer = TensorQuantizer()
@property
def weight_quantizer(self):
return self._weight_quantizer
| 4,740 | 33.107914 | 122 | py |
torch2trt | torch2trt-master/torch2trt/contrib/qat/layers/quant_conv.py | """
Original source code taken from nvidia quantization library.
Changes made to correctly map quantized pytorch layers to TensorRT layers at INT8
Original source: tools/pytorch_quantization/pytorch_quantization/nn/modules/quant_conv.py under
https://github.com/NVIDIA/TensorRT.git
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.modules.conv import _ConvTransposeNd
from pytorch_quantization import tensor_quant
from . import _utils
class _QuantConvNd(torch.nn.modules.conv._ConvNd, _utils.QuantWeightMixin):
"""base class of quantized Conv inherited from _ConvNd
Comments of original arguments can be found in torch.nn.modules.conv
Arguments:
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
Readonly properties:
- weight_quantizer:
Static methods:
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding,
groups, bias, padding_mode, quant_desc_weight):
super(_QuantConvNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation,
transposed, output_padding, groups, bias, padding_mode)
self.init_quantizer(quant_desc_weight)
def _quant(self, input):
"""WARNING: Originally Applying quantization on input and weight
Currently , quantization is applied to weights only.
Function called by the classes lower in the hierarchy, which actually performs the quantization before forward
in the derivate class the particular Function.
Arguments:
input: in_features to quantize
Returns:
A tuple: quant_weight
"""
quant_weight = self._weight_quantizer(self.weight)
return quant_weight
class QuantConv2d(_QuantConvNd):
"""Quantized 2D conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__,weight_only=True, **kwargs)
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_pair(0), groups, bias, padding_mode,quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_weight = self._weight_quantizer(self.weight)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv2d(F.pad(input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
else:
output = F.conv2d(input, quant_weight, self.bias, self.stride, self.padding, self.dilation,self.groups)
return output
class QuantConvBN2d(_QuantConvNd):
"""Quantized 2D conv + BN"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__,weight_only=True, **kwargs)
super(QuantConvBN2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_pair(0), groups, bias, padding_mode,quant_desc_weight=quant_desc_weight)
eps = kwargs.pop('eps',1e-5)
momentum = kwargs.pop('momentum',0.1)
affine=kwargs.pop('affine',True)
track_running_stats=kwargs.pop('track_running_stats',True)
self.bn = nn.BatchNorm2d(num_features=out_channels,eps=eps,momentum=momentum,affine=affine,track_running_stats=track_running_stats)
self.register_buffer('folded_weight',torch.ones_like(self.weight))
self.register_buffer('folded_bias',torch.ones_like(self.bn.running_mean))
def _fold_BN(self,conv_w,conv_b,bn_rm,bn_rv,eps,bn_w,bn_b):
'''
conv_w, conv_b = conv weight and bias
bn_rm,bn_rv = batch norm running mean and variance
bn_w , bn_b = batch norm weight and bias
eps = epsilon
'''
if conv_b is None:
conv_b = torch.zeros_like(bn_rm)
if bn_w is None:
bn_w = torch.ones_like(bn_rm)
if bn_b is None:
bn_b = torch.zeros_like(bn_rm)
bn_var_rsqrt = torch.rsqrt(bn_rv + eps)
scale_factor= bn_w * bn_var_rsqrt
self.folded_weight = conv_w * (scale_factor).reshape([-1] + [1] * (len(conv_w.shape) - 1))
self.folded_bias = (conv_b - bn_rm) * scale_factor + bn_b
return self.folded_weight,self.folded_bias
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
folded_weight , folded_bias = self._fold_BN(self.weight,self.bias,self.bn.running_mean,self.bn.running_var,self.bn.eps,self.bn.weight,self.bn.bias)
quant_weight = self._weight_quantizer(folded_weight)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv2d(F.pad(input, expanded_padding, mode='circular'),
quant_weight, folded_bias, self.stride,
_pair(0), self.dilation, self.groups)
else:
output = F.conv2d(input, quant_weight, folded_bias, self.stride, self.padding, self.dilation,self.groups)
return output
## Inference class for quantized convbn2d
class IQuantConvBN2d(torch.nn.Conv2d,_utils.QuantMixinWeight):
'''
mimicking inference side of things for convbn2d
no input quantizer , only weight quantizer
'''
def __init__(self,in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
super().__init__(in_channels,out_channels,kernel_size,stride=stride,padding=padding,dilation=dilation,groups=groups,bias=bias,padding_mode=padding_mode)
self.init_quantizer()
eps = kwargs.pop('eps',1e-5)
momentum = kwargs.pop('momentum',0.1)
affine=kwargs.pop('affine',True)
track_running_stats=kwargs.pop('track_running_stats',True)
self.bn = nn.BatchNorm2d(num_features=out_channels,eps=eps,momentum=momentum,affine=affine,track_running_stats=track_running_stats)
self.register_buffer('folded_weight',torch.ones_like(self.weight))
self.register_buffer('folded_bias',torch.ones_like(self.bn.running_mean))
def __repr__(self):
s = super().__repr__()
s = "(" + s + "dynamic_range amax {0:.4f})".format(self._weight_quantizer.learned_amax)
return s
def forward(self,inputs):
output = F.conv2d(inputs,self.folded_weight,self.folded_bias,self.stride,self.padding,self.dilation,self.groups)
return output
## Inference class for quantized conv2d
class IQuantConv2d(torch.nn.Conv2d,_utils.QuantMixinWeight):
'''
mimicking inference side of things
no input quantizer , only weight quantizer
'''
def __init__(self,in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros'):
super().__init__(in_channels,out_channels,kernel_size,stride=stride,padding=padding,dilation=dilation,groups=groups,bias=bias,padding_mode=padding_mode)
self.init_quantizer()
def forward(self,inputs):
return super(IQuantConv2d, self).forward(inputs)
#class QuantConv2d(torch.nn.Conv2d,_utils.QuantMixin):
# '''
# mimicking inference side of things
# '''
# def __init__(self,in_channels,
# out_channels,
# kernel_size,
# stride=1,
# padding=0,
# dilation=1,
# groups=1,
# bias=True,
# padding_mode='zeros'):
# super().__init__(in_channels,out_channels,kernel_size,stride=stride,padding=padding,dilation=dilation,groups=groups,bias=bias,padding_mode=padding_mode)
# self.init_quantizer()
#
#
| 9,912 | 38.494024 | 161 | py |
torch2trt | torch2trt-master/torch2trt/contrib/qat/layers/quant_activation.py | import torch
from . import _utils
from pytorch_quantization import tensor_quant
from pytorch_quantization.nn.modules import _utils as utils
class QuantReLU(torch.nn.ReLU,utils.QuantInputMixin):
"""
Quantized ReLu. However, output of relu needs to be quantized for it to correclty map to a TRT layer
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self,inplace=False,**kwargs):
super(QuantReLU,self).__init__(inplace)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self,input):
output = super(QuantReLU,self).forward(input)
## Although o/p of relu is being quantized, terminology still says input quantizer, will change later
output = self._input_quantizer(output)
return output
## Inference class for quantized relu
class IQuantReLU(torch.nn.ReLU,_utils.QuantMixinInput):
'''
Mimicking inference side for relu followed by a quantized layer
'''
def __init__(self,inplace=False):
super().__init__(inplace)
self.init_quantizer()
def __repr__(self):
s = super().__repr__()
s = "(" + s + "dynamic_range amax {0:.4f})".format(self._input_quantizer.learned_amax)
return s
def forward(self,inputs):
return super(IQuantReLU,self).forward(inputs)
| 1,438 | 34.097561 | 109 | py |
torch2trt | torch2trt-master/torch2trt/contrib/qat/converters/QuantRelu.py | from torch2trt.torch2trt import *
import tensorrt as trt
@tensorrt_converter('torch2trt.contrib.qat.layers.quant_activation.IQuantReLU.forward',enabled=trt_version() >= '7.0')
def convert_QuantReLU(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_activation(
input=input_trt, type=trt.ActivationType.RELU)
## int 8 precision
if 'qat_mode' in ctx.torch2trt_kwargs:
amax = module._input_quantizer.learned_amax
layer.precision = trt.int8
layer.set_output_type(0,trt.int8)
out = layer.get_output(0)
out.dynamic_range=(-amax,amax)
output._trt = layer.get_output(0)
| 771 | 34.090909 | 118 | py |
torch2trt | torch2trt-master/torch2trt/contrib/qat/converters/QuantConv.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import tensorrt as trt
@tensorrt_converter('torch2trt.contrib.qat.layers.quant_conv.IQuantConv2d.forward', enabled=trt_version() >= '7.0')
def convert_QuantConv(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
input_dim = input.dim() - 2
kernel_size = module.kernel_size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, ) * input_dim
stride = module.stride
if not isinstance(stride, tuple):
stride = (stride, ) * input_dim
padding = module.padding
if not isinstance(padding, tuple):
padding = (padding, ) * input_dim
dilation = module.dilation
if not isinstance(dilation, tuple):
dilation = (dilation, ) * input_dim
kernel = module.weight.detach().cpu().numpy()
bias = None #trt.Weights(torch_dtype_to_trt(module.weight.dtype))
if module.bias is not None:
bias = module.bias.detach().cpu().numpy()
layer = ctx.network.add_convolution_nd(
input=input_trt,
num_output_maps=module.out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias)
layer.stride_nd = stride
layer.padding_nd = padding
layer.dilation_nd = dilation
if module.groups is not None:
layer.num_groups = module.groups
if 'qat_mode' in ctx.torch2trt_kwargs:
#Setting dynamic range for conv
w_quant_amax = module._weight_quantizer.learned_amax
layer.precision = trt.int8
layer.set_output_type(0,trt.int8)
conv_out = layer.get_output(0)
conv_out.dynamic_range=(-w_quant_amax,w_quant_amax)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_basic_trt7():
return IQuantConv2d(10, 5, kernel_size=1, stride=1, padding=0)
'''
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_stride2_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_kernel3_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_dilation2_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_basic_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_stride2_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_kernel3_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_dilation2_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
'''
| 3,568 | 33.990196 | 116 | py |
torch2trt | torch2trt-master/torch2trt/contrib/qat/converters/QuantConvBN.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import tensorrt as trt
@tensorrt_converter('torch2trt.contrib.qat.layers.quant_conv.IQuantConvBN2d.forward', enabled=trt_version() >= '7.0')
def convert_QuantConv(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
input_dim = input.dim() - 2
kernel_size = module.kernel_size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, ) * input_dim
stride = module.stride
if not isinstance(stride, tuple):
stride = (stride, ) * input_dim
padding = module.padding
if not isinstance(padding, tuple):
padding = (padding, ) * input_dim
dilation = module.dilation
if not isinstance(dilation, tuple):
dilation = (dilation, ) * input_dim
kernel = module.folded_weight.detach().cpu().numpy()
bias = None #trt.Weights(torch_dtype_to_trt(module.weight.dtype))
if hasattr(module,'folded_bias'):
bias = module.folded_bias.detach().cpu().numpy()
layer = ctx.network.add_convolution_nd(
input=input_trt,
num_output_maps=module.out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias)
layer.stride_nd = stride
layer.padding_nd = padding
layer.dilation_nd = dilation
if module.groups is not None:
layer.num_groups = module.groups
if 'qat_mode' in ctx.torch2trt_kwargs:
#Setting dynamic range for conv
w_quant_amax = module._weight_quantizer.learned_amax
layer.precision = trt.int8
layer.set_output_type(0,trt.int8)
conv_out = layer.get_output(0)
conv_out.dynamic_range=(-w_quant_amax,w_quant_amax)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_basic_trt7():
return IQuantConv2d(10, 5, kernel_size=1, stride=1, padding=0)
'''
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_stride2_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_kernel3_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_dilation2_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_basic_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_stride2_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_kernel3_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_dilation2_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
'''
| 3,594 | 34.245098 | 118 | py |
torch2trt | torch2trt-master/torch2trt/converters/einsum.py | import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.einsum')
def convert_einsum(ctx):
einsum_eq = ctx.method_args[0]
input_tensors = ctx.method_args[1:]
output = ctx.method_return
# parts = einsum_eq.split('->')
# strip batch dimension
# if len(parts) > 1:
# lhs = parts[0]
# rhs = parts[1]
# lhs = ','.join([part[1:] for part in lhs.split(',')])
# rhs = rhs[1:]
# einsum_eq = lhs + '->' + rhs
# else:
# einsum_eq = ','.join([part[1:] for part in einsum_eq.split(',')])
layer = ctx.network.add_einsum(
[t._trt for t in input_tensors],
einsum_eq
)
output._trt = layer.get_output(0)
class Einsum(nn.Module):
def __init__(self, einsum_eq):
super().__init__()
self.einsum_eq = einsum_eq
def forward(self, *args):
return torch.einsum(self.einsum_eq, *args)
@add_module_test(torch.float32, torch.device('cuda'), [(2, 2, 5), (2, 5, 4)], max_batch_size=2)
def test_einsum_bmm():
return Einsum('bij,bjk->bik') | 1,147 | 23.956522 | 95 | py |
torch2trt | torch2trt-master/torch2trt/converters/unsqueeze.py | import tensorrt as trt
import numpy as np
import torch
from torch2trt.torch2trt import tensorrt_converter, get_arg, torch_dim_resolve_negative, add_missing_trt_tensors, torch_dim_to_trt_axes
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.unsqueeze')
@tensorrt_converter('torch.unsqueeze')
def convert_unsqueeze(ctx):
input = ctx.method_args[0]
if not hasattr(input, '_trt'):
return
dim = get_arg(ctx, 'dim', pos=1, default=None)
assert(dim is not None)
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
input_shape_trt = ctx.network.add_shape(input_trt).get_output(0)
new_shape_trt = []
for i in range(input.ndim):
# copy input dim
new_shape_trt.append(
ctx.network.add_slice(input_shape_trt, [i], [1], [1]).get_output(0)
)
# add unsqueeze dim
new_shape_trt.insert(
dim,
ctx.network.add_constant([1], np.array([1], dtype=np.int32)).get_output(0)
)
new_shape_trt = ctx.network.add_concatenation(new_shape_trt).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output._trt = layer.get_output(0)
class UnSqueeze(torch.nn.Module):
def __init__(self, dim):
super(UnSqueeze, self).__init__()
self.dim = dim
def forward(self, x):
return x.unsqueeze(dim=self.dim)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 7)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 1, 3)], max_batch_size=2)
def test_unsqueeze():
return UnSqueeze(2)
| 1,725 | 27.766667 | 135 | py |
torch2trt | torch2trt-master/torch2trt/converters/squeeze.py | import tensorrt as trt
import numpy as np
import torch
from torch2trt.torch2trt import tensorrt_converter, get_arg, torch_dim_resolve_negative, add_missing_trt_tensors, torch_dim_to_trt_axes
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.squeeze')
@tensorrt_converter('torch.squeeze')
def convert_squeeze(ctx):
input = ctx.method_args[0]
output = ctx.method_return
dim = get_arg(ctx, 'dim', pos=1, default=None)
if dim < 0:
dim = len(input.shape) + dim
assert dim >= 0
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
input_shape_trt = ctx.network.add_shape(input_trt).get_output(0)
new_shape_trt = []
# get shape before flatten
for i in range(input.ndim):
if input.size(i) == 1 and (dim is None) or (i == dim):
continue # skip 1 dimensions
else:
new_shape_trt.append(
ctx.network.add_slice(input_shape_trt, [i], [1], [1]).get_output(0)
)
new_shape_trt = ctx.network.add_concatenation(new_shape_trt).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output._trt = layer.get_output(0)
class Squeeze(torch.nn.Module):
def __init__(self, dim):
super(Squeeze, self).__init__()
self.dim = dim
def forward(self, x):
return x.squeeze(dim=self.dim)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 1)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 1, 3)], max_batch_size=2)
def test_squeeze():
return Squeeze(2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 1, 1)])
def test_squeeze_neg():
return Squeeze(-1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 1, 1)])
def test_squeeze_neg2():
return Squeeze(-2)
| 1,912 | 29.854839 | 135 | py |
torch2trt | torch2trt-master/torch2trt/converters/batch_norm.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.batch_norm', enabled=trt_version() >= '7.0')
def convert_batch_norm_trt7(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
running_mean = get_arg(ctx, 'running_mean', pos=1, default=None)
running_var = get_arg(ctx, 'running_var', pos=2, default=None)
weight = get_arg(ctx, 'weight', pos=3, default=None)
bias = get_arg(ctx, 'bias', pos=4, default=None)
eps = get_arg(ctx, 'eps', pos=7, default=10e-6)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
scale = weight.detach().cpu().numpy() / np.sqrt(running_var.detach().cpu().numpy() + eps)
bias = bias.detach().cpu().numpy() - running_mean.detach().cpu().numpy() * scale
power = np.ones_like(scale)
layer = ctx.network.add_scale_nd(input_trt, trt.ScaleMode.CHANNEL, bias, scale, power, 1)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3)], enabled=trt_version() >= '7.0')
@add_module_test(torch.float32, torch.device('cuda'), [(2, 10, 3, 3)], enabled=trt_version() >= '7.0', max_batch_size=2)
def test_batch_norm_2d_trt7():
return torch.nn.BatchNorm2d(10)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3, 3)], enabled=trt_version() >= '7.0')
@add_module_test(torch.float32, torch.device('cuda'), [(2, 10, 3, 3, 3)], enabled=trt_version() >= '7.0', max_batch_size=2)
def test_batch_norm_3d_2_trt7():
return torch.nn.BatchNorm3d(10)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 32, 2, 36, 47)], enabled=trt_version() >= '7.0')
@add_module_test(torch.float32, torch.device('cuda'), [(2, 32, 2, 36, 47)], enabled=trt_version() >= '7.0', max_batch_size=2)
def test_batch_norm_3d_trt7():
return torch.nn.BatchNorm3d(32)
| 1,915 | 42.545455 | 125 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.