repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
mBNN | mBNN-main/MBNN/Polynomial.py | import argparse
from datetime import date
import os
import numpy as np
import copy
from datetime import datetime
from progress.bar import ChargingBar as Bar
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
from utils import *
from evaluate import *
from MBNN.models import *
from MBNN.MCMC import *
from MBNN.Mask_Update import *
parser = argparse.ArgumentParser(description='mBNN noisy polynomial regression')
parser.add_argument('--death_method', type=str, default= 'proposed', choices=["proposed", "Oops", "random"])
parser.add_argument('--birth_method', type=str, default= 'random', choices=["proposed", "Oops", "random"])
parser.add_argument('--gpu', default=0, type=int, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--out', default='', help='Directory to output the result')
parser.add_argument('--data_dir', default='', help='Directory of dataset')
parser.add_argument('--method', type=str, default= 'mBNN', choices=['BNN', 'mBNN'])
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
out_directory = args.out + "/method_" + args.method
if args.method == 'mBNN':
out_directory += "_birth_" + args.birth_method + "_death_" + args.death_method
n = 20
p_data = 1
num_classes=1
x = np.random.uniform(low=-4.0, high=4.0, size=n)
eps = np.random.normal(loc=0.0, scale=3.0, size=n)
y = x**3 + eps
L, p, prior, prior_scale = 2, 1000, "Cauchy", 0.3
lr = 0.1
thinning_interval = 10
burn_in_sample = 300
num_sample = 1
lamb, N_max = 0.1, 3
task = 'regression'
model = M_MLP(p_data, num_classes, L, p, prior, prior_scale).cuda()
step_size = lr/n
total_epoch = thinning_interval*(burn_in_sample+num_sample)
burn_in_epoch = thinning_interval*burn_in_sample
step_size_tuning = DualAveragingStepSize(initial_step_size=step_size)
model_tmp = copy.deepcopy(model)
trainloader = DataLoader(TensorDataset(torch.tensor(x).float().reshape(-1,1), torch.tensor(y).float().reshape(-1,1)), batch_size=n)
model_list=[]
res = pd.DataFrame()
for epoch in range(total_epoch):
model.train()
try:
p_accept = HMC(model, task, trainloader, lr=step_size, lf_step = 30)
if epoch < burn_in_epoch:
step_size, _ = step_size_tuning.update(p_accept)
if epoch == burn_in_epoch:
_, step_size = step_size_tuning.update(p_accept)
except:
model = copy.deepcopy(model_tmp)
model_tmp = copy.deepcopy(model)
if (epoch+1) % 1 ==0:
model.eval()
for _ in range(2):
mask_update_dataloader(model, task, trainloader, n, lamb, N_max, args.death_method, args.birth_method)
if task == 'regression':
sigma_update(model, trainloader, n)
if args.method == "mBNN":
model.eval()
with torch.no_grad():
_,ERROR_train = log_likelihood_dataloader(model, task, trainloader)
node_sparsity_l = []
l=0
for name, param in model.named_parameters():
if 'active' in name:
node_sparsity_l.append(torch.sum(param.data).item())
l+=1
node_sparsity = np.mean(np.stack(node_sparsity_l))
res.loc[epoch,'n1'] = node_sparsity_l[0]
res.loc[epoch,'n2'] = node_sparsity_l[1]
res.loc[epoch,'n_r'] = (node_sparsity_l[0] + node_sparsity_l[1])/(2*1000)
res.to_csv(out_directory + '/node_sparsity.csv')
if (epoch + 1) > burn_in_epoch and (epoch+1-burn_in_epoch) % thinning_interval == 0:
model_list.append(copy.deepcopy(model))
torch.save({'model_list' : model_list}, out_directory + "/models.pth")
if __name__ == '__main__':
main() | 4,017 | 33.637931 | 135 | py |
mBNN | mBNN-main/MBNN/UCI.py | import argparse
from datetime import date
import os
import numpy as np
import copy
from datetime import datetime
from progress.bar import ChargingBar as Bar
import pickle
import sys
sys.path.append('..')
from utils import *
from evaluate import *
from MBNN.models import *
from MBNN.MCMC import *
from MBNN.Mask_Update import *
parser = argparse.ArgumentParser(description='mBNN for UCI dataset')
########################## model setting ##########################
parser.add_argument('--model', type=str, default= 'MLP', choices=['MLP', 'resnet18'], help='architecture of model')
parser.add_argument('--L', type=int, default= 2, help='depth of MLP')
parser.add_argument('--p', type=int, default= 1000, help='width of MLP')
parser.add_argument('--prior', type=str, default= 'Cauchy', choices=['Normal', 'Cauchy'], help='type of prior')
parser.add_argument('--prior_scale', type=float, default= 1.0, help='scale of prior')
########################## basic setting ##########################
parser.add_argument('--start_seed', type=int, help='start_seed')
parser.add_argument('--end_seed', type=int, help='end_seed')
parser.add_argument('--gpu', default=0, type=int, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_dir', default='', help='Directory of dataset')
parser.add_argument('--out', default='', help='Directory to output the result')
######################### Dataset setting #############################
parser.add_argument('--dataset', type=str, default= 'Boston', choices=['Boston', 'Concrete', 'Energy', 'Yacht'], help='dataset name')
parser.add_argument('--batch_size', default=-1, type=int, help='train batchsize')
######################### MCMC setting #############################
parser.add_argument('--num_sample', default=20, type=int, help='the number of MCMC sample')
parser.add_argument('--burn_in_sample', default=2, type=int, help='the number of MCMC burn_in sample')
parser.add_argument('--thinning_interval', default=200, type=int, help='thinning_interval epoch')
parser.add_argument('--lr', default=1e-2, type=float, help='initial learning rate')
parser.add_argument('--lf_step', default=20, type=int, help='the number of leapfrog step')
######################### Mask setting ###############################
parser.add_argument('--update_period', default=1, type=int, help='period of mask update')
parser.add_argument('--num_update', default=10, type=int, help='number of times to update at a time')
parser.add_argument('--lamb', default=0.1, type=float, help='hyperparameter for the prior of sparsity')
parser.add_argument('--N_max', default=3, type=int, help='maximum of the number of updated mask')
parser.add_argument('--death_method', type=str, default= 'proposed', choices=["proposed", "Oops", "random"])
parser.add_argument('--birth_method', type=str, default= 'random', choices=["proposed", "Oops", "random"])
######################### add name #############################
parser.add_argument('--add_name', default='', type=str, help='add_name')
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
out_directory = args.out + '/MBNN' + '/' + str(args.dataset)
out_directory += '/' + str(date.today().strftime('%Y%m%d')[2:])
out_directory += '/' + '_lam' + str(args.lamb)
out_directory += '_bm_' + args.birth_method + '_dm_' + args.death_method
if args.add_name != '':
out_directory +='_'+str(args.add_name)
if not os.path.isdir(out_directory):
mkdir_p(out_directory)
result1_list = []
result2_list = []
result3_list = []
result4_list = []
result5_list = []
for seed in range(args.start_seed, args.end_seed+1):
print("seed : ", seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic=True
#dataset
trainloader, testloader, n_train, n_test, p_data, num_classes = data_process(args.dataset, args.data_dir, seed, args.batch_size)
std = torch.std(next(iter(trainloader))[1])
model = M_MLP(p_data, num_classes, args.L, args.p, args.prior, args.prior_scale).cuda()
#MCMC
step_size = args.lr/n_train
total_epoch = args.thinning_interval*(args.burn_in_sample+args.num_sample)
burn_in_epoch = args.thinning_interval*args.burn_in_sample
mt=0
bar = Bar('{:>10}'.format('Training'), max=total_epoch)
step_size_tuning = DualAveragingStepSize(initial_step_size=step_size)
model_tmp = copy.deepcopy(model)
res = pd.DataFrame()
for epoch in range(total_epoch):
try:
p_accept = HMC(model, task, trainloader, lr=step_size, lf_step = args.lf_step)
if epoch < burn_in_epoch:
step_size, _ = step_size_tuning.update(p_accept)
if epoch == burn_in_epoch:
_, step_size = step_size_tuning.update(p_accept)
except:
model = copy.deepcopy(model_tmp)
model_tmp = copy.deepcopy(model)
if (epoch+1) % args.update_period ==0:
model.eval()
for _ in range(args.num_update):
mask_update_dataloader(model, task, trainloader, n_train, args.lamb, args.N_max, args.death_method, args.birth_method)
sigma_update(model, trainloader, n_train, std)
if (epoch+1)%(20)==0:
model.eval()
with torch.no_grad():
_,ERROR_train = log_likelihood_dataloader(model, task, trainloader)
_,ERROR_test = log_likelihood_dataloader(model, task, testloader)
ERROR_train, ERROR_test = np.sqrt(ERROR_train.item()/n_train), np.sqrt(ERROR_test.item()/n_test)
node_sparsity_l = []
l=0
for n, p in model.named_parameters():
if 'active' in n:
node_sparsity_l.append(torch.sum(p.data).item())
l+=1
node_sparsity = np.mean(np.stack(node_sparsity_l))
bar.suffix = '({epo}/{total_epo}) ERR_train:{ER_tr} | ERR_test:{ER_te} | {ns}'.format(
epo=epoch + 1,
total_epo=total_epoch,
ER_tr=np.round(ERROR_train,3),
ER_te=np.round(ERROR_test,3),
ns = np.round(node_sparsity,2)
)
res.loc[epoch,'n1'] = node_sparsity_l[0]
res.loc[epoch,'n2'] = node_sparsity_l[1]
res.loc[epoch,'n_r'] = (node_sparsity_l[0] + node_sparsity_l[1])/(2*args.p)
res.to_csv(out_directory + '/node_sparsity_seed_%d.csv'%(seed,))
if (epoch + 1) > burn_in_epoch and (epoch+1-burn_in_epoch) % args.thinning_interval == 0:
torch.save(model.state_dict(), out_directory + '/seed_%d_mt_%d.pt'%(seed, mt))
mt += 1
bar.next()
bar.finish()
print("model testing")
pred_list=[]
target_list=[]
sigma_list=[]
with torch.no_grad():
for mt in range(args.num_sample):
model = M_MLP(p_data, num_classes, args.L, args.p, args.prior, args.prior_scale).cuda()
model.load_state_dict(torch.load(out_directory + '/seed_%d_mt_%d.pt'%(seed,mt)))
model.eval()
pred = []
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
pred.append(outputs.squeeze())
if mt==0:
target_list.append(targets.squeeze())
sigma_list.append(model.sigma.data)
pred_list.append(torch.cat(pred,0))
pred_list = torch.stack(pred_list)
target_list = torch.cat(target_list,0)
sigma_list = torch.cat(sigma_list,0)
RMSE, m_NLL, CRPS = evaluate_averaged_model_regression(pred_list, target_list, sigma_list)
print("RMSE : ", RMSE, " m_NLL : ", m_NLL, " CRPS : ", CRPS)
result1_list.append(RMSE)
result2_list.append(m_NLL)
result3_list.append(CRPS)
num_seed = args.end_seed - args.start_seed
result1_list, result2_list, result3_list = np.stack(result1_list), np.stack(result2_list), np.stack(result3_list)
print("%.3f(%.3f), %.3f(%.3f), %.3f(%.3f)" % (np.mean(result1_list), np.std(result1_list)/np.sqrt(num_seed),np.mean(result2_list), np.std(result2_list)/np.sqrt(num_seed),np.mean(result3_list), np.std(result3_list)/np.sqrt(num_seed)))
if __name__ == '__main__':
main()
| 9,295 | 43.908213 | 251 | py |
mBNN | mBNN-main/MBNN/models.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class M_relu(nn.Module):
def __init__(self, input_dim, init_active_dim):
super().__init__()
self.input_dim = input_dim
self.init_active_dim = init_active_dim
self.active = nn.Parameter(torch.cuda.FloatTensor([1]*self.init_active_dim +
[0]*(self.input_dim-self.init_active_dim)), requires_grad=False)
def forward(self, x):
if len(x.shape)==2:
M = self.active.view(1,-1)
return M * F.relu(x)
elif len(x.shape)==4:
M = self.active.view(1,-1,1,1)
return M * F.relu(x)
class M_MLP(nn.Module):
def __init__(self, input_dim, output_dim, L, init_p, prior, prior_scale):
super(M_MLP, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.L = L
self.p = np.floor(1.1*init_p).astype(int) # For additional space
self.p_vec = np.hstack([input_dim,np.repeat(self.p,L),output_dim])
self.init_active_dim = init_p
self.layers = self._make_layer()
self.register_buffer('sigma', torch.tensor([1.0]).cuda())
if prior == 'Cauchy':
self.prior = torch.distributions.cauchy.Cauchy(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
elif prior == 'Normal':
self.prior = torch.distributions.normal.Normal(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
self.prior_scale = prior_scale
def _make_layer(self):
layers = []
for l in range(self.L):
layer = []
layer.append(nn.Linear(self.p_vec[l], self.p_vec[l+1]))
layer.append(M_relu(self.p_vec[l+1], self.init_active_dim))
layers.append(nn.Sequential(*layer))
layer = []
layer.append(nn.Linear(self.p, self.output_dim))
layers.append(nn.Sequential(*layer))
return nn.Sequential(*layers)
def forward(self, x):
x = x.view(-1, self.input_dim)
x = self.layers(x)
return x
class Masked_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(Masked_BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.M_relu1 = M_relu(planes,int(planes*4/5))
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.M_relu2 = M_relu(planes,int(planes*4/5))
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = self.M_relu1(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.M_relu2(out)
return out
class Masked_ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, prior, prior_scale):
super(Masked_ResNet, self).__init__()
self.in_planes = int(64*(5/4))
self.conv1 = nn.Conv2d(3, int(64*(5/4)), kernel_size=3, stride=1, padding=1, bias=False) # For additional space
self.bn1 = nn.BatchNorm2d(int(64*(5/4)))
self.M_relu = M_relu(int(64*(5/4)),64)
self.layer1 = self._make_layer(block, int(64*(5/4)), num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, int(128*(5/4)), num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, int(256*(5/4)), num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, int(512*(5/4)), num_blocks[3], stride=2)
self.linear = nn.Linear(int(512*(5/4))*block.expansion, num_classes)
if prior == 'Cauchy':
self.prior = torch.distributions.cauchy.Cauchy(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
elif prior == 'Normal':
self.prior = torch.distributions.normal.Normal(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
self.prior_scale = prior_scale
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.M_relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def M_ResNet18(num_classes, prior, prior_scale):
return Masked_ResNet(Masked_BasicBlock, [2,2,2,2], num_classes=num_classes, prior=prior, prior_scale=prior_scale) | 5,411 | 41.28125 | 134 | py |
mBNN | mBNN-main/MBNN/MCMC.py | import numpy as np
import torch
import copy
def ll_regression(output, target, sigma):
exponent = -((target - output)**2).sum() / (2 * sigma**2)
log_coeff = (-0.5*torch.log(2*torch.tensor(np.pi))-torch.log(sigma))*output.shape[0]
return (log_coeff + exponent)
def log_prior(model):
l_prior = torch.zeros(1, requires_grad=True).cuda()
for n, p in model.named_parameters():
if (not 'bn' in n) and (not 'active' in n) and (not 'sigma' in n) :
l_prior = l_prior + model.prior.log_prob(p).sum()
return l_prior
def log_likelihood_dataloader(model, task, dataloader):
l_likelihood = torch.zeros(1, requires_grad=True).cuda()
ERROR = torch.zeros(1, requires_grad=True).cuda()
if task is 'regression':
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
l_likelihood = l_likelihood + ll_regression(outputs, targets, model.sigma)
ERROR += ((targets - outputs)**2).sum()
elif task is 'classification':
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.cuda(), targets.cuda().squeeze().long()
outputs = model(inputs)
l_likelihood = l_likelihood - criterion(outputs, targets)
ERROR += (torch.argmax(outputs,1) != targets).sum()
return l_likelihood, ERROR
def log_prob_dataloader(model, task, dataloader):
return log_prior(model) + log_likelihood_dataloader(model, task, dataloader)[0]
def log_likelihood_data(model, task, inputs, targets):
if task is 'regression':
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
l_likelihood = ll_regression(outputs, targets, model.sigma)
ERROR = ((targets - outputs)**2).sum()
elif task is 'classification':
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
inputs, targets = inputs.cuda(), targets.cuda().squeeze().long()
outputs = model(inputs)
l_likelihood = - criterion(outputs, targets)
ERROR = (torch.argmax(outputs,1) != targets).sum()
return l_likelihood, ERROR
def log_prob_data(model, task, inputs, targets, datasize):
return log_prior(model) + log_likelihood_data(model, task, inputs, targets)[0]*(datasize/targets.shape[0])
def HMC(model, task, dataloader, lr, lf_step):
model_tmp = copy.deepcopy(model).cuda()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
if not hasattr(p, 'momentum'):
setattr(p, 'momentum', torch.zeros_like(p.data))
p.momentum = torch.randn(p.size()).cuda()
log_prob = log_prob_dataloader(model_tmp, task, dataloader)
ham = -log_prob.data
for n, p in model_tmp.named_parameters():
if p.requires_grad:
ham += (p.momentum * p.momentum).sum()/2
model_tmp.zero_grad()
log_prob.backward()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
p.momentum += lr*p.grad.data/2
p.data = p.data + lr * p.momentum
for step in range(lf_step-1):
model_tmp.zero_grad()
log_prob = log_prob_dataloader(model_tmp, task, dataloader)
log_prob.backward()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
p.momentum += lr*p.grad.data
p.data = p.data + lr * p.momentum
model_tmp.zero_grad()
log_prob = log_prob_dataloader(model_tmp, task, dataloader)
log_prob.backward()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
p.momentum += lr*p.grad.data/2
ham_tmp = -log_prob.data
for n, p in model_tmp.named_parameters():
if p.requires_grad:
ham_tmp += (p.momentum * p.momentum).sum()/2
if np.isnan((-ham_tmp + ham).item()):
return 0
log_p_accept = min(0., float(-ham_tmp + ham))
if log_p_accept >= torch.log(torch.rand(1)):
model.load_state_dict(copy.deepcopy(model_tmp.state_dict()))
return np.exp(log_p_accept)
class DualAveragingStepSize:
def __init__(self, initial_step_size, target_accept=0.7, gamma=0.2, t0=10.0, kappa=0.75):
self.mu = np.log(initial_step_size)
self.target_accept = target_accept
self.gamma = gamma
self.t = t0
self.kappa = kappa
self.error_sum = 0
self.log_averaged_step = 0
def update(self, p_accept):
self.error_sum += self.target_accept - p_accept
log_step = self.mu - self.error_sum / (np.sqrt(self.t) * self.gamma)
eta = self.t ** -self.kappa
self.log_averaged_step = eta * log_step + (1 - eta) * self.log_averaged_step
self.t += 1
return np.exp(log_step), np.exp(self.log_averaged_step)
def sigma_update(model, dataloader, n, std=1.0):
ERROR = torch.zeros(1, requires_grad=True).cuda()
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
ERROR = ERROR + ((targets - outputs)**2).sum()
gamma_sampler = torch.distributions.gamma.Gamma(torch.tensor([1.0 + n/2]).cuda(), ERROR/2 + std)
model.sigma = torch.sqrt(1/gamma_sampler.sample())
def noise_loss(model, lr):
noise_loss = 0.0
noise_std = (2/lr)**0.5
for var in model.parameters():
means = torch.cuda.FloatTensor(var.size()).fill_(0)
noise_loss += torch.sum(var * torch.normal(means, std = noise_std).cuda())
return noise_loss | 6,201 | 37.04908 | 110 | py |
mBNN | mBNN-main/MBNN/Mask_Update.py | import copy
import numpy as np
import torch
from itertools import permutations
from MCMC import *
def mask_update_dataloader(model, task, dataloader, datasize, lam=0.1, N_max=3, death_method="proposed", birth_method="random"):
model.zero_grad()
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = True
log_prob = log_prob_dataloader(model, task, dataloader)
log_prob.backward()
u = torch.bernoulli(torch.tensor(0.5))
N = np.random.randint(1,N_max+1)
if u==1:
proposal_prob = proposal_probability(model, u, method=death_method)
else:
proposal_prob = proposal_probability(model, u, method=birth_method)
ind = torch.multinomial(proposal_prob, N, replacement=False)
active_vectors, active_vectors_size, active_vectors_item, _, current_p = active_info(model)
L = len(active_vectors_size)
ind_L = [(np.cumsum(active_vectors_size)<=i).sum() for i in ind.cpu().numpy()]
ind_p = ind.cpu().numpy() - np.insert(np.cumsum(active_vectors_size),0,0)[ind_L]
nums = np.bincount(ind_L, minlength=L)
prior_rate = 1
for l in range(L):
if nums[l]>0:
prior_rate *= torch.exp(-(lam*np.log(datasize))**5 *((2-4*u)*current_p[l]*nums[l] + nums[l]**2))
if u==0:
prior_rate *= torch.prod(torch.arange(current_p[l]+1,current_p[l]+nums[l]+1)) / torch.prod(torch.arange(active_vectors_size[l]-current_p[l]-nums[l]+1,active_vectors_size[l]-current_p[l]+1))
else:
prior_rate *= torch.prod(torch.arange(active_vectors_size[l]-current_p[l]+1,active_vectors_size[l]-current_p[l]+nums[l]+1)) / torch.prod(torch.arange(current_p[l]-nums[l]+1,current_p[l]+1))
model_tmp = copy.deepcopy(model)
active_vectors_tmp = []
for n, p in model_tmp.named_parameters():
if 'active' in n:
active_vectors_tmp.append(p)
for k in range(len(ind_L)):
active_vectors_tmp[ind_L[k]][ind_p[k]].data += (1-2*u)
log_prob_tmp = log_prob_dataloader(model_tmp, task, dataloader)
log_prob_tmp.backward()
if (1-u)==1:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=death_method)
else:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=birth_method)
accept_prob = torch.clamp(prior_rate*torch.exp(log_prob_tmp-log_prob)*prob_multi_wor(proposal_prob_tmp, ind)/prob_multi_wor(proposal_prob, ind), max=1)
if torch.rand(1).cuda()<accept_prob:
for k in range(len(ind_L)):
if active_vectors[ind_L[k]].sum()>2*N_max:
active_vectors[ind_L[k]][ind_p[k]].data += (1-2*u)
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = False
def mask_update_data(model, task, inputs, targets, datasize, lam=0.1, N_max=3, death_method="proposed", birth_method="random"):
model.zero_grad()
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = True
log_prob = log_prob_data(model, task, inputs, targets, datasize)
log_prob.backward()
u = torch.bernoulli(torch.tensor(0.5))
N = np.random.randint(1,N_max+1)
if u==1:
proposal_prob = proposal_probability(model, u, method=death_method)
else:
proposal_prob = proposal_probability(model, u, method=birth_method)
ind = torch.multinomial(proposal_prob, N, replacement=False)
active_vectors, active_vectors_size, active_vectors_item, _, current_p = active_info(model)
L = len(active_vectors_size)
ind_L = [(np.cumsum(active_vectors_size)<=i).sum() for i in ind.cpu().numpy()]
ind_p = ind.cpu().numpy() - np.insert(np.cumsum(active_vectors_size),0,0)[ind_L]
nums = np.bincount(ind_L, minlength=L)
prior_rate = 1
for l in range(L):
if nums[l]>0:
prior_rate *= torch.exp(-(lam*np.log(datasize))**5 *((2-4*u)*current_p[l]*nums[l] + nums[l]**2))
if u==0:
prior_rate *= torch.prod(torch.arange(current_p[l]+1,current_p[l]+nums[l]+1)) / torch.prod(torch.arange(active_vectors_size[l]-current_p[l]-nums[l]+1,active_vectors_size[l]-current_p[l]+1))
else:
prior_rate *= torch.prod(torch.arange(active_vectors_size[l]-current_p[l]+1,active_vectors_size[l]-current_p[l]+nums[l]+1)) / torch.prod(torch.arange(current_p[l]-nums[l]+1,current_p[l]+1))
model_tmp = copy.deepcopy(model)
active_vectors_tmp = []
for n, p in model_tmp.named_parameters():
if 'active' in n:
active_vectors_tmp.append(p)
for k in range(len(ind_L)):
active_vectors_tmp[ind_L[k]][ind_p[k]].data += (1-2*u)
log_prob_tmp = log_prob_data(model_tmp, task, inputs, targets, datasize)
log_prob_tmp.backward()
if (1-u)==1:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=death_method)
else:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=birth_method)
accept_prob = torch.clamp(prior_rate*torch.exp(log_prob_tmp-log_prob)*prob_multi_wor(proposal_prob_tmp, ind)/prob_multi_wor(proposal_prob, ind), max=1)
if torch.rand(1).cuda()<accept_prob:
#print("jump")
for k in range(len(ind_L)):
active_vectors[ind_L[k]][ind_p[k]].data += (1-2*u)
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = False
return accept_prob
def active_info(model):
active_vectors = []
active_vectors_size = []
active_vectors_item = []
active_vectors_grad = []
current_p = []
for n, p in model.named_parameters():
if 'active' in n:
active_vectors.append(p)
active_vectors_size.append(len(p))
active_vectors_item.append(p.data)
active_vectors_grad.append(p.grad)
current_p.append(torch.sum(p).data)
active_vectors_item = torch.hstack(active_vectors_item)
active_vectors_grad = torch.hstack(active_vectors_grad)
return(active_vectors, active_vectors_size, active_vectors_item, active_vectors_grad, current_p)
def prob_multi_wor(proposal_prob, ind):
sum=0
for ind_permute in permutations(ind):
subtract = 0
prod = 1
proposal_prob_tmp = copy.deepcopy(proposal_prob)
for k in range(len(ind_permute)):
prod *= proposal_prob_tmp[ind_permute[k]]/torch.sum(proposal_prob_tmp)
proposal_prob_tmp[ind_permute[k]] *= 0
sum += prod
return sum
def proposal_probability(model, u, method="random"):
_, _, active_vectors_item, active_vectors_grad, _ = active_info(model)
if method=='Oops':
proposal_prob = torch.where(active_vectors_item==u, torch.clamp(torch.exp(-(2*active_vectors_item-1)*active_vectors_grad/2), max=1e35), torch.tensor([0.]).cuda())
elif method=='proposed':
proposal_prob = torch.where(active_vectors_item==u, torch.exp(-torch.abs(active_vectors_grad)/2), torch.tensor([0.]).cuda())
elif method=='random':
proposal_prob = torch.where(active_vectors_item==u, torch.tensor([1.]).cuda(), torch.tensor([0.]).cuda())
proposal_prob = proposal_prob / torch.sum(proposal_prob)
return proposal_prob
| 7,489 | 40.381215 | 205 | py |
LTL-GATA | LTL-GATA-main/src/belief_graph.py | from typing import List, Tuple, Set, Union
import torch
from textworld.logic import Proposition
from utils import triplet_to_proposition, proposition_to_triplet
def exists_triplet(triplets, arg1, arg2, relation):
for i, t in enumerate(triplets):
if arg1 in [t[0], "*"] and\
arg2 in [t[1], "*"] and\
relation in [t[2], "*"]:
return i
return None
def update_graph_triplets(triplets, commands, node_vocab, relation_vocab):
# remove duplicate but remain the order
tmp_commands = []
for cmd in commands:
if cmd not in tmp_commands:
tmp_commands.append(cmd)
commands = tmp_commands
for cmd in commands:
# get verb-arg1-arg2
if not (cmd.startswith("add") or cmd.startswith("delete")):
continue
cmd = cmd.split()
if len(cmd) <= 3:
continue
verb = cmd[0]
relation = cmd[-1]
if relation not in relation_vocab:
continue
nouns = " ".join(cmd[1:-1])
arg1, arg2 = "", ""
for n in node_vocab:
if nouns.startswith(n):
tmp = nouns[len(n):].strip()
if tmp == n:
continue
if tmp in node_vocab:
arg1 = n
arg2 = tmp
break
if arg1 == "" or arg2 == "":
continue
# manipulate KG
index = exists_triplet(triplets, arg1, arg2, relation)
if verb == "add":
if index is not None:
continue
triplets.append([arg1, arg2, relation])
else:
if index is None:
continue
triplets = triplets[:index] + triplets[index + 1:]
return triplets
class BeliefGraph:
def __init__(self, observation: str,
node_vocab: Set[str],
relation_vocab: Set[str],
ground_truth: bool,
seperator: str) -> None:
self._facts = set()
self._node_vocab = node_vocab
self._facts_as_triplets = list()
self._observations = observation
self._ground_truth = ground_truth
self._relation_vocab = relation_vocab
self._seperator = seperator
self.reward = 0
self.memory = set()
def to_str(self, facts) -> str:
return str(['-'.join(fact) for fact in facts].sort())
@property
def seen(self) -> bool:
return self.to_str(self._facts_as_triplets) in self.memory
def update_memory(self):
self.memory.add(self.to_str(self._facts_as_triplets))
def graph_rewards(self, prev_facts: List[Tuple[str, str, str]],
entities: List[str],
filtered: bool) -> float:
# if self._ground_truth:
# return 0
# if self.seen:
# return self.reward
if filtered:
prev_facts = set([tuple(f)
for f in prev_facts if f[0] in entities])
curr_facts = set([tuple(f) for f in self._facts_as_triplets
if f[0] in entities])
else:
prev_facts = set([tuple(f) for f in prev_facts])
curr_facts = set([tuple(f) for f in self._facts_as_triplets])
self.reward += len(curr_facts - prev_facts)
self.update_memory()
return self.reward
@property
def facts_as_triplets(self) -> Set[Tuple[str, str, str]]:
if self._ground_truth:
triplets = list()
for prop in self._facts:
triplet = proposition_to_triplet(prop)
node1, node2, relation = triplet
if node1 in self._node_vocab and node2 in \
self._node_vocab and \
relation in self._relation_vocab:
triplets.append(triplet)
return triplets
return self._facts_as_triplets
@property
def facts(self) -> Set[Proposition]:
return self._facts
def update(self, facts: Union[List[List[Tuple[Proposition]]],
List[Tuple[str, str, str]]],) -> None:
if facts is None:
return
if self._ground_truth:
# self._facts = self._facts | set(facts)
self._facts = facts
return
if isinstance(facts, torch.Tensor):
self._facts = facts
return
# per example in a batch
predict_cmds = facts.split("<sep>")
if predict_cmds[-1].endswith("<eos>"):
predict_cmds[-1] = predict_cmds[-1][:-5].strip()
else:
predict_cmds = predict_cmds[:-1]
if len(predict_cmds) == 0:
return
predict_cmds = [" ".join(item.split()) for item in predict_cmds]
predict_cmds = [item for item in predict_cmds if len(item) > 0]
self._facts_as_triplets = update_graph_triplets(
self._facts_as_triplets, predict_cmds,
self._node_vocab, self._relation_vocab)
new_facts = [triplet_to_proposition(triplet, self._seperator)
for triplet in self._facts_as_triplets]
self._facts = new_facts
| 5,241 | 33.261438 | 74 | py |
LTL-GATA | LTL-GATA-main/src/utils.py | from __future__ import annotations
from typing import List, Dict, Any, Union, Tuple, Deque
from pathlib import Path, PosixPath
from argparse import Namespace
import pickle
from logic import Variable, Proposition
import numpy as np
import torch
import yaml
MISSING_WORDS = set()
CONSTANT_NAMES = {"P": "player", "I": "player",
"ingredient": None, "slot": None, "RECIPE": "cookbook"}
def proposition_to_triplet(proposition: Proposition) -> Proposition:
if len(proposition.names) == 1:
return (get_variable_name(proposition.names[0]),
proposition.name, 'is')
return (get_variable_name(proposition.names[0]),
get_variable_name(proposition.names[1]),
proposition.name)
def triplet_to_proposition(triplet: Tuple[str, str, str],
seperator: str) -> Proposition:
if triplet[-1] == 'is':
Proposition(name=triplet[1], arguments=[
Variable(triplet[0])], seperator=seperator)
return Proposition(name=triplet[2], arguments=[
Variable(triplet[0]), Variable(triplet[1])], seperator=seperator)
def rename_variables(
item: Union[Variable, Proposition]) -> Union[Variable, Proposition]:
if isinstance(item, Variable):
item.name = get_variable_name(item.name)
return item
if isinstance(item, Proposition):
new_args = list()
for var in item.arguments:
var.name = get_variable_name(var.name)
new_args.append(var)
item.arguments = tuple(new_args)
return item
raise ValueError(
f"Unknown item type {type(item)}. " +
"Must be one of {Variable, Proposition}")
def max_len(list_of_list):
if len(list_of_list) == 0:
return 0
return max(map(len, list_of_list))
def pad_sequences(sequences, maxlen=None, dtype='int32', value=0.):
'''
Partially borrowed from Keras
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
if isinstance(sequences, np.ndarray):
return sequences
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
# pre truncating
trunc = s[-maxlen:]
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
# post padding
x[idx, :len(trunc)] = trunc
return x
def _word_to_id(word, word2id):
try:
return word2id[word]
except KeyError:
key = word + "_" + str(len(word2id))
if key not in MISSING_WORDS:
print("Warning... %s is not in vocab, vocab size is %d..." %
(word, len(word2id)))
# print(word)
# print(word2id)
# raise
MISSING_WORDS.add(key)
# with open("missing_words.txt", 'a+') as outfile:
# outfile.write(key + '\n')
# outfile.flush()
return word2id['<unk>'] # actually just 1
def _words_to_ids(words, word2id):
return [_word_to_id(word, word2id) for word in words]
def get_variable_name(name: str) -> str:
return CONSTANT_NAMES[name] if name in CONSTANT_NAMES else name
def load_config(config_file: Path, params: List[str]) -> Namespace:
assert Path(config_file).exists(), \
f"Could not find config file {config_file}"
with open(config_file) as reader:
config = yaml.safe_load(reader)
# Parse overriden params.
for param in params:
fqn_key, value = param.split("=")
entry_to_change = config
keys = fqn_key.split(".")
for k in keys[:-1]:
entry_to_change = entry_to_change[k]
entry_to_change[keys[-1]] = yaml.load(value)
# print(config)
config = Namespace(**config)
for k, v in config.__dict__.items():
if isinstance(v, dict):
config.__dict__[k] = Namespace(**v)
config.graph_updater.checkpoint = Path(
config.graph_updater.checkpoint).expanduser()
config.io.pretrained_embedding_path = Path(
config.io.pretrained_embedding_path).expanduser()
config.ltl_encoder.pretrained_embedding_path = Path(
config.ltl_encoder.pretrained_embedding_path).expanduser()
config.text_encoder.pretrained_embedding_path = Path(
config.text_encoder.pretrained_embedding_path).expanduser()
config.actions_encoder.pretrained_embedding_path = Path(
config.actions_encoder.pretrained_embedding_path).expanduser()
root = Path(config.io.root)
if root == root.expanduser():
config.io.root = Path(config_file).expanduser().parent / root
else:
config.io.root = root.expanduser()
config.io.output_dir = config.io.root / config.io.output_dir
config.io.checkpoint_dir = config.io.root / config.io.checkpoint_dir
config.io.trajectories_dir = config.io.root / config.io.trajectories_dir
config.io.output_dir.mkdir(exist_ok=True, parents=True)
config.io.checkpoint_dir.mkdir(exist_ok=True, parents=True)
config.io.trajectories_dir.mkdir(exist_ok=True, parents=True)
config.io.data_dir = Path(config.io.data_dir).expanduser()
config.io.vocab_dir = Path(config.io.vocab_dir).expanduser()
if config.test.filename is None:
if (config.io.checkpoint_dir / 'best.pt').exists():
config.test.filename = config.io.checkpoint_dir / 'best.pt'
else:
config.test.filename = config.io.checkpoint_dir / 'best_eval.pt'
else:
test_filename = Path(config.test.filename)
if test_filename == test_filename.expanduser():
config.test.filename = config.io.root / test_filename
else:
config.test.filename = test_filename.expanduser()
if config.io.tag is None:
config.io.tag = config.io.root.name
return config
def serialize_namespace(config: Namespace) -> Dict[str, Any]:
config = vars(config)
for k, v in config.items():
if isinstance(v, Namespace):
config[k] = serialize_namespace(v)
if isinstance(v, PosixPath):
config[k] = str(v)
return config
def expand_trajectories(
episode_no: int, batch_size: int,
obs: List[str], infos: Dict[str, Any],
states: BatchedStates,
trajectories: Deque[Dict[str, Any]],
**kwargs) -> List[Any]:
for i in range(batch_size):
idx = -(batch_size - i)
trajectories[idx]['states'] = states.states[i]
trajectories[idx]['observations'].append(obs[i])
trajectories[idx]['infos'] = {k: v[i] for k, v in infos.items()}
for k, v in kwargs.items():
trajectories[idx][k].append(v[i])
# trajectories[idx]['admissible_actions'].append(admissible_actions[i])
# trajectories[idx]['actions'].append(actions[i])
# trajectories[idx]['rewards'].append(scores[i])
# trajectories[idx]['step_rewards'].append(step_scores[i])
# trajectories[idx]['terminals'].append(dones[i])
return trajectories
def save_trajectories(trajectories, trajectories_file,) -> None:
with trajectories_file.open('wb') as f:
for path in trajectories:
for k, v in path.items():
if k not in {'infos', 'admissible_actions', 'states'}:
path[k] = np.array(v)
pickle.dump(trajectories, f)
def to_np(x):
if isinstance(x, np.ndarray):
return x
return x.data.cpu().numpy()
def to_pt(np_matrix, cuda=False, type='long'):
if type == 'long':
if cuda:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.LongTensor).cuda())
else:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.LongTensor))
elif type == 'float':
if cuda:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.FloatTensor).cuda())
else:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.FloatTensor))
def graph_triplets_to_string(list_of_triples):
list_of_triples = ["|".join(item) for item in list_of_triples]
list_of_triples.sort()
key = "<|>".join(list_of_triples)
return key
class EpisodicCountingMemory:
def __init__(self):
self.reset()
def push(self, stuff):
"""stuff is list of list of list of strings.
e.g.: [[['player', 'shed', 'at'], ['workbench', 'shed', 'at']]]
"""
assert len(stuff) > 0 # batch size should be greater than 0
if len(self.memory) == 0:
for _ in range(len(stuff)):
self.memory.append(set())
for b in range(len(stuff)):
key = graph_triplets_to_string(stuff[b])
self.memory[b].add(key)
def has_not_seen(self, stuff):
assert len(stuff) > 0 # batch size should be greater than 0
res = []
for b in range(len(stuff)):
key = graph_triplets_to_string(stuff[b])
res.append(key not in self.memory[b])
return res
def reset(self):
self.memory = []
def __len__(self):
return len(self.memory)
class LinearSchedule:
"""
Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
:param schedule_timesteps: (int) Number of timesteps for which to linearly
anneal initial_p to final_p
:param initial_p: (float) initial output value
:param final_p: (float) final output value
"""
def __init__(self, schedule_timesteps: int, final_p: float,
initial_p: float = 1.0) -> None:
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
self.schedule = np.linspace(initial_p, final_p, schedule_timesteps)
def value(self, step: int) -> float:
if step < 0:
return self.initial_p
if step >= self.schedule_timesteps:
return self.final_p
else:
return self.schedule[step]
| 11,080 | 34.289809 | 114 | py |
LTL-GATA | LTL-GATA-main/src/agent.py | from typing import Tuple, List, Dict, Any
from argparse import Namespace
from pathlib import Path
import logging
import copy
import json
import pdb
import torch.nn.functional as F
import numpy as np
import torch
from textworld import EnvInfos
from experience_replay import PrioritizedExperienceReplay
from components import AgentModes, Actions, Vocabulary
from graph_updater import GraphUpdater
from belief_graph import BeliefGraph
from utils import LinearSchedule
from optim import get_optimizer
from state import BatchedStates
from model import PolicyNet
from ltl import LTL
logger = logging.getLogger()
class Agent:
def __init__(self, config: Namespace, word_vocab: Vocabulary,
ltl_vocab: Vocabulary, relation_vocab: Vocabulary,
node_vocab: Vocabulary,
action_vocab: Vocabulary,
pretrain: bool = False) -> None:
self.ltl = config.ltl
self.training = config.training
self.evaluate = config.evaluate
self.test_config = config.test
with (config.io.data_dir / 'uuid_mapping.json').open('r') as f:
self.uuid_mapping = json.load(f)
self.set_random_seed(self.training.random_seed)
self._states: BatchedStates = None
self._eval_states: BatchedStates = None
self._word_vocab = word_vocab
self._ltl_vocab = ltl_vocab
self._relation_vocab = relation_vocab
self._node_vocab = node_vocab
self._action_vocab = action_vocab
self.graph_updater = GraphUpdater(
checkpoint=config.graph_updater.checkpoint,
vocab_path=config.io.vocab_dir,
word_vocab=self._word_vocab,
pretrained_embedding_path=(
config.io.pretrained_embedding_path),
node_vocab=self._node_vocab,
relation_vocab=self._relation_vocab,
config=config.graph_updater)
# self.graph_updater.eval()
# for param in self.graph_updater.parameters():
# param.requires_grad = Tr
self.policy_net = PolicyNet(
config=config, word_vocab=self._word_vocab,
ltl_vocab=self._ltl_vocab,
action_vocab=self._action_vocab,
pretrain=pretrain,
graph_updater=self.graph_updater,
context_length=self.training.context_length)
self.target_net = PolicyNet(
config=config, word_vocab=self._word_vocab,
ltl_vocab=self._ltl_vocab,
action_vocab=self._action_vocab,
pretrain=pretrain,
graph_updater=self.graph_updater,
context_length=self.training.context_length)
self.target_net.train()
self.update_target_net()
for param in self.target_net.parameters():
param.requires_grad = False
self.replay = ep = config.training.experience_replay
self.use_belief_graph = config.model.use_belief_graph or \
config.model.use_ltl
self.use_ltl = config.model.use_ltl
self.recurrent_memory = config.model.recurrent_memory
self.experience = PrioritizedExperienceReplay(
beta=ep['beta'],
batch_size=ep['batch_size'],
multi_step=ep['multi_step'],
max_episode=config.training.max_episode,
seed=config.training.random_seed,
alpha=ep['alpha'],
capacity=ep['capacity'],
discount_gamma_game_reward=ep['discount_gamma_game_reward'],
accumulate_reward_from_final=ep['accumulate_reward_from_final'],
recurrent_memory=self.recurrent_memory,
sample_update_from=ep['sample_update_from'],
sample_history_length=ep['sample_history_length']
)
self.epsilon = self.training.epsilon_greedy['anneal_from']
self.epsilon_scheduler = LinearSchedule(
schedule_timesteps=self.training.epsilon_greedy['episodes'],
initial_p=self.epsilon,
final_p=self.training.epsilon_greedy['anneal_to'])
self.optimizer, self.scheduler = get_optimizer(
self.policy_net, config.training.optimizer)
if self.training.cuda:
self.cuda()
def save_model(self, episode_no: int, path: Path,
best_train: int, best_eval: int) -> None:
data = {
'policy_net': self.policy_net.state_dict(),
'policy_net_word_vocab': self.policy_net.word_vocab,
'policy_net_ltl_vocab': self.policy_net.ltl_vocab,
'policy_net_action_vocab': self.policy_net.action_vocab,
'episode_no': episode_no,
'optimizer': self.optimizer.state_dict(),
'scheduler': self.optimizer.state_dict(),
'best_train': best_train,
'best_eval': best_eval,
}
torch.save(data, path)
def load_model(self, path: Path) -> None:
data = torch.load(path)
if 'policy_net' not in data:
self.policy_net.load_state_dict(data)
return 0
self.policy_net.load_vocab(data['policy_net_word_vocab'],
data['policy_net_ltl_vocab'],
data['policy_net_action_vocab'])
self.policy_net.load_state_dict(data['policy_net'])
self.target_net.load_vocab(data['policy_net_word_vocab'],
data['policy_net_ltl_vocab'],
data['policy_net_action_vocab'])
self.update_target_net()
self.optimizer.load_state_dict(data['optimizer'])
if self.scheduler is not None:
self.scheduler.load_state_dict(data['scheduler'])
if self.training.cuda:
self.cuda()
return data['episode_no'], data['best_train'], data['best_eval']
def reset_eval_states(self,
obs: List[str],
actions: List[str],
infos: Dict[str, List[Any]]) -> BatchedStates:
config = self.evaluate if self.mode == AgentModes.eval else \
self.test_config
if config.difficulty_level in {'r', 'mixed'}:
diffs = [self.uuid_mapping[game.metadata['uuid']]
if game.metadata['uuid'] in self.uuid_mapping else 11
for game in infos['game']]
else:
diffs = [config.difficulty_level for _ in range(
config.batch_size)]
self.graph_updater.prev_graph_hidden_state = \
torch.zeros(
len(obs), self.graph_updater.config.block_hidden_dim).cuda()
belief_graphs = [BeliefGraph(
o, node_vocab=self._node_vocab,
relation_vocab=self._relation_vocab,
ground_truth=self.graph_updater.use_ground_truth_graph,
seperator='_') for
o in obs]
facts = self.graph_updater(
[f"{o} <sep> {a}" for o, a in zip(obs, actions)],
[bg.facts_as_triplets for bg in belief_graphs],
actions=actions,
infos=infos)
for i, facts_ in enumerate(facts):
belief_graphs[i].update(facts_)
belief_graphs[i].update_memory()
if self.use_ltl or (
self.training.graph_reward_lambda > 0
and self.training.graph_reward_filtered):
ltl_formulas = [LTL(
facts=bg.facts,
win_facts=win_facts,
fail_facts=fail_facts,
use_ground_truth=self.ltl.use_ground_truth,
reward_scale=self.ltl.reward_scale,
first_obs=first_obs,
as_bonus=self.ltl.as_bonus,
next_constrained=self.ltl.next_constrained,
difficulty=diff,
incomplete_cookbook=self.ltl.incomplete_cookbook,
single_reward=self.ltl.single_reward,
single_token_prop=self.ltl.single_token_prop,
reward_per_progression=self.training.reward_per_ltl_progression,
no_cookbook=self.ltl.no_cookbook,
negative_for_fail=self.ltl.negative_for_fail,
dont_progress=self.ltl.dont_progress)
for first_obs, bg, win_facts, fail_facts, diff in
zip(obs, belief_graphs, infos['win_facts'],
infos['fail_facts'],
diffs)]
else:
ltl_formulas = [None for _ in obs]
self._eval_states = BatchedStates(observations=obs,
actions=actions,
ltl_formulas=ltl_formulas,
belief_graphs=belief_graphs)
@ property
def eval_states(self) -> BatchedStates:
return self._eval_states
def update_eval_states(self, observations: List[str],
current_actions: List[str],
dones: List[bool],
infos: Dict[str, Any] = None) -> None:
if self.use_belief_graph:
facts = self.graph_updater(
[f"{obs} <sep> {a}" for obs, a in zip(
observations, current_actions)],
[bg.facts_as_triplets for bg in
self._eval_states.belief_graphs],
actions=current_actions,
infos=infos)
else:
facts = [None for _ in observations]
return self._eval_states.update(observations, current_actions,
facts, dones)
def reset_states(self,
obs: List[str],
actions: List[str],
infos: Dict[str, List[Any]]) -> BatchedStates:
if self.training.difficulty_level in {'r', 'mixed'}:
diffs = [self.uuid_mapping[game.metadata['uuid']]
if game.metadata['uuid'] in self.uuid_mapping else 11
for game in infos['game']]
else:
diffs = [self.training.difficulty_level for _ in range(
self.training.batch_size)]
self.graph_updater.prev_graph_hidden_state = \
torch.zeros(
len(obs), self.graph_updater.config.block_hidden_dim).cuda()
belief_graphs = [BeliefGraph(
o, node_vocab=self._node_vocab,
relation_vocab=self._relation_vocab,
ground_truth=self.graph_updater.use_ground_truth_graph,
seperator='_') for
o in obs]
facts = self.graph_updater(
[f"{o} <sep> {a}" for o, a in zip(obs, actions)],
[bg.facts_as_triplets for bg in belief_graphs],
actions=actions,
infos=infos)
for i, facts_ in enumerate(facts):
belief_graphs[i].update(facts_)
belief_graphs[i].update_memory()
if self.use_ltl or (
self.training.graph_reward_lambda > 0
and self.training.graph_reward_filtered):
ltl_formulas = [LTL(
facts=bg.facts,
win_facts=win_facts,
fail_facts=fail_facts,
use_ground_truth=self.ltl.use_ground_truth,
reward_scale=self.ltl.reward_scale,
as_bonus=self.ltl.as_bonus,
first_obs=first_obs,
next_constrained=self.ltl.next_constrained,
difficulty=diff,
incomplete_cookbook=self.ltl.incomplete_cookbook,
single_reward=self.ltl.single_reward,
single_token_prop=self.ltl.single_token_prop,
reward_per_progression=self.training.reward_per_ltl_progression,
no_cookbook=self.ltl.no_cookbook,
negative_for_fail=self.ltl.negative_for_fail,
dont_progress=self.ltl.dont_progress)
for first_obs, bg, win_facts, fail_facts, diff in
zip(obs, belief_graphs, infos['win_facts'],
infos['fail_facts'],
diffs)]
else:
ltl_formulas = [None for _ in obs]
self._states = BatchedStates(observations=obs,
actions=actions,
ltl_formulas=ltl_formulas,
belief_graphs=belief_graphs)
@ property
def states(self) -> BatchedStates:
return self._states
def update_states(self, observations: List[str],
current_actions: List[str],
dones: List[bool],
infos: Dict[str, Any] = None) -> None:
if self.use_belief_graph:
facts = self.graph_updater(
[f"{obs} <sep> {a}" for obs, a in zip(
observations, current_actions)],
[bg.facts_as_triplets for bg in self._states.belief_graphs],
actions=current_actions,
infos=infos)
else:
facts = [None for _ in observations]
return self._states.update(observations, current_actions, facts, dones)
def cuda(self) -> None:
if torch.cuda.is_available():
# torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.policy_net.cuda()
self.target_net.cuda()
self.graph_updater.cuda()
# torch.backends.cudnn.deterministic = True
else:
logger.critical("CUDA set but no CUDA device available")
raise RuntimeError("CUDA set but no CUDA device available")
def set_random_seed(self, seed: int) -> None:
np.random.seed(seed)
torch.manual_seed(seed)
if self.training.cuda:
torch.cuda.manual_seed(seed)
def update_target_net(self) -> None:
self.target_net.load_state_dict(self.policy_net.state_dict())
def eval(self) -> None:
self.policy_net.eval()
self.mode = AgentModes.eval
def test(self) -> None:
self.policy_net.eval()
self.mode = AgentModes.test
def train(self) -> None:
self.policy_net.train()
self.mode = AgentModes.train
def games_max_scores(self, infos: Dict[str, Any]) -> List[float]:
scores = [game.max_score for game in infos["game"]]
original_scores = copy.copy(scores)
score_offset = np.zeros_like(scores)
# if self.training.use_negative_reward:
# score_offset = 1
if self.training.reward_ltl and self.policy_net.use_ltl:
optimal_path_lengths = np.array([np.sum([len(tuples) > 0 for
tuples in quest])
for quest in infos['win_facts']])
# if self.training.reward_per_ltl_progression:
# # +1 for cookbook
# score_offset = 1 + optimal_path_lengths
# else:
if self.training.reward_per_ltl_progression:
score_offset += (1 + optimal_path_lengths) \
* self.ltl.reward_scale
elif self.ltl.single_reward:
score_offset = np.ones_like(scores)
else:
states = self.states if self.mode == AgentModes.train else \
self.eval_states
_len = [len(s.ltl.translator.formulas) for s in states]
score_offset = _len * self.ltl.reward_scale
if self.training.reward_ltl_only:
return (original_scores,
(np.ones_like(scores) * score_offset).tolist())
if self.training.penalize_path_length > 0:
score_offset -= self.training.penalize_path_length * \
optimal_path_lengths
return (original_scores,
(np.array(scores) + score_offset).tolist())
# TODO maybe a better way to do this?
def adjust_scores(self, scores: List[int],
step_no: int,
state_update_rewards: List[float],
state_update_dones: List[int],
wons: List[bool],
dones: List[bool]) -> List[float]:
original_scores = copy.copy(scores)
reward_ltl = self.training.reward_ltl and self.policy_net.use_ltl
if reward_ltl:
# if not self.training.reward_per_ltl_progression:
# state_update_rewards = state_update_dones
pass
# if self.training.reward_ltl_only:
# return original_scores, state_update_rewards
else:
state_update_rewards = [0] * len(scores)
score_offsets = np.array(state_update_rewards)
if self.training.reward_ltl_positive_only and reward_ltl:
score_offsets[np.where(score_offsets < 0)] = 0
if reward_ltl and self.training.reward_ltl_only:
return original_scores, score_offsets
if self.training.penalize_path_length > 0:
score_offsets -= self.training.penalize_path_length * (step_no + 1)
scores = np.array(scores)
# if self.training.use_negative_reward:
# neg_offset = (np.array(dones, dtype=float) * -1) + \
# (np.array(wons, dtype=float))
# scores[np.where(np.array(neg_offset) < 0)] = 0
# score_offsets + neg_offset
adjusted_scores = (scores + score_offsets).tolist()
if self.training.reward_ltl and self.policy_net.use_ltl and \
self.training.persistent_negative_reward:
for i, ltl_score in enumerate(state_update_rewards):
if ltl_score < 0:
adjusted_scores[i] = ltl_score
if dones[i] and not wons[i]:
adjusted_scores[i] = -1
if self.training.graph_reward_lambda > 0:
adjusted_scores += self.training.graph_reward_lambda * \
self.get_graph_rewards()
return original_scores, adjusted_scores
def get_graph_rewards(self) -> np.ndarray:
states = self.states if self.mode == AgentModes.train else \
self.eval_states
return np.array([0 if len(state.past) < 2 else
state.belief_graph.graph_rewards(
prev_facts=state.past[-1].belief_graph._facts_as_triplets,
entities=state.ltl.entities if state.ltl is not None else None,
filtered=self.training.graph_reward_filtered)
for state in states])
@staticmethod
def env_infos(light: bool, win_formulas: bool) -> EnvInfos:
request_infos = EnvInfos()
request_infos.game = True
request_infos.facts = not light
request_infos.win_facts = win_formulas
request_infos.fail_facts = win_formulas
request_infos.description = not light
request_infos.won = True
request_infos.lost = True
request_infos.admissible_commands = True
return request_infos
def choose_actions_indices(
self, actions_scores: torch.Tensor,
actions_mask: torch.Tensor, admissible_actions: List[Actions],
greedy: bool, random: bool, eps: float) -> torch.Tensor:
if greedy and random:
logging.critical(
"Asked to act greedily and randomly which is not possible")
raise ValueError(
"Asked to act greedily and randomly which is not possible")
elif greedy:
return self.choose_maxQ_actions(actions_scores, actions_mask).cpu()
elif random:
return self.choose_random_actions(admissible_actions)
batch_size = len(actions_scores)
maxQ_filtered_actions = self.choose_maxQ_actions(
actions_scores, actions_mask).cpu()
random_filtered_actions = self.choose_random_actions(
admissible_actions)
r = np.random.uniform(low=0., high=1.,
size=batch_size)
# less than selects random
if eps is None:
eps = self.epsilon
less_than_e = torch.tensor(
r <= eps, dtype=torch.int64).reshape((batch_size, 1))
# stack both options, gather based on less than epsilon
return torch.gather(torch.stack((maxQ_filtered_actions,
random_filtered_actions), dim=1),
dim=-1, index=less_than_e)
def choose_maxQ_actions(
self, actions_scores: torch.Tensor,
actions_mask: torch.Tensor) -> torch.Tensor:
actions_scores += -torch.min(
actions_scores, -1, keepdim=True)[0] + 1e-2
actions_scores *= actions_mask
if self.mode == AgentModes.test and self.test_config.softmax:
ret = torch.tensor([])
for b, mask in zip(actions_scores, actions_mask):
tmp = torch.functional.F.softmax(
b / self.test_config.softmax_temperature).detach().cpu()
tmp *= mask.detach().cpu()
options = torch.nonzero(torch.isclose(
tmp, tmp.max(), atol=1e-4)).flatten()
ret = torch.cat(
(ret, torch.tensor([np.random.choice(options)])))
return torch.tensor(ret).int()
else:
return torch.argmax(actions_scores.detach().cpu(), -1,)
def choose_random_actions(
self, admissible_actions: List[Actions]) -> torch.Tensor:
return torch.tensor([
np.random.choice(
len(candidates)) for candidates in admissible_actions],
dtype=torch.int32)
def act(self,
states: BatchedStates,
admissible_actions: List[Actions],
greedy: bool = False,
random: bool = False,
previous_hidden: torch.Tensor = None,
previous_cell: torch.Tensor = None,
eps: float = None) -> Actions:
actions_scores, actions_mask = None, None
if not random:
actions_scores, actions_mask, previous_hidden, previous_cell = \
self.policy_net(
states, admissible_actions, previous_hidden, previous_cell)
with torch.no_grad():
next_actions_indices = self.choose_actions_indices(
actions_scores=actions_scores,
actions_mask=actions_mask,
admissible_actions=admissible_actions,
greedy=greedy,
random=random,
eps=eps)
return next_actions_indices.squeeze(), previous_hidden, previous_cell
def get_loss(self, episode_no: int,) -> Tuple[float, float]:
# pdb.set_trace()
_samples, _stepped_samples, _rewards, sample_indices, _weights = \
self.experience.get_samples(
episode_no, self.recurrent_memory)
if _samples is None:
return None, None
# losses, q_values = list(), list()
sample_indices = np.array(sample_indices)
all_q_values, td_errors, dones = list(), list(), list()
losses, mlm_losses = list(), list()
previous_hidden, previous_cell = None, None
for step_no, (samples, stepped_samples, rewards, weights) in \
enumerate(zip(_samples, _stepped_samples, _rewards, _weights)):
stepped_states, stepped_admissible_actions = list(), list()
states, admissible_actions, indices = list(), list(), list()
for sample, stepped_sample in zip(samples, stepped_samples):
states.append(sample.state)
admissible_actions.append(sample.admissible_actions)
indices.append(sample.action)
stepped_states.append(stepped_sample.state)
stepped_admissible_actions.append(
stepped_sample.admissible_actions)
dones.append(sample.done)
states = BatchedStates(states=states)
stepped_states = BatchedStates(states=stepped_states)
not_dones = 1 - torch.tensor(
dones, device=self.policy_net.device, dtype=torch.int64)
actions_scores, actions_mask, previous_hidden, previous_cell = \
self.policy_net(
states, admissible_actions, previous_hidden, previous_cell)
q_values = torch.gather(
actions_scores, 1, torch.tensor(
indices, dtype=torch.int64,
device=self.policy_net.device).reshape((-1, 1)))
if self.recurrent_memory and \
step_no < self.replay['sample_update_from']:
continue
with torch.no_grad():
stepped_actions_scores, stepped_actions_mask, _, _ = \
self.policy_net(
stepped_states, stepped_admissible_actions,
previous_hidden, previous_cell)
stepped_indices = self.choose_maxQ_actions(
stepped_actions_scores, stepped_actions_mask
).to(self.policy_net.device)
stepped_indices = stepped_indices.reshape((-1, 1))
stepped_actions_scores_tgt, stepped_actions_tgt_mask, _, _ = \
self.target_net(
stepped_states, stepped_admissible_actions,
previous_hidden, previous_cell)
# stepped_actions_scores_tgt *= not_dones.unsqueeze(1)
stepped_q_values = torch.gather(
stepped_actions_scores_tgt, 1, stepped_indices)
discount = torch.tensor(
(np.ones((stepped_indices.shape[0])) *
self.replay['discount_gamma_game_reward']) **
sample_indices[:, 1],
device=self.policy_net.device, dtype=torch.float64)
# dones = torch.tensor(
# [s.is_final for s in samples], dtype=torch.float64,
# device=self.policy_net.device)
# discount *= (1 - dones)
# pdb.set_trace()
rewards = torch.tensor(rewards, device=self.policy_net.device) + \
stepped_q_values.squeeze() * (discount * not_dones)
# ** self.replay['multi_step'])
rewards = rewards.type(torch.float32)
loss = F.smooth_l1_loss(
q_values.squeeze(), rewards, reduction='none')
loss *= torch.tensor(
weights, device=self.policy_net.device, dtype=torch.float64)
losses.append(loss)
loss = self.policy_net.mlm_loss(states.observations)
mlm_losses.append(loss)
all_q_values.append(q_values)
# q_values has shape [*mod-replay-batch-size*, 1]
# rewards has shape [*mod-replay-batch-size*,
# *mod-replay-batch-size*]
abs_td_err = torch.abs(q_values.squeeze() - rewards)
td_errors.append(abs_td_err)
_range = 1
if self.recurrent_memory:
_range = self.replay['sample_history_length'] - \
self.replay['sample_update_from']
for i in range(_range):
abs_td_err = td_errors[i]
td_errors.append(abs_td_err)
new_priorities = abs_td_err + self.replay['eps']
self.experience.update_priorities(
sample_indices[:, 0] + i + (self.replay['sample_update_from']
if self.recurrent_memory else 0),
new_priorities.detach().cpu())
loss = torch.stack(losses).mean()
q_values = torch.stack(all_q_values).mean()
if any(mlm_losses):
loss = loss + self.training.mlm_alpha * \
torch.stack(mlm_losses).mean()
return loss, q_values
def update_dqn(self, episode_no: int) -> Tuple[float, float]:
loss, q_values = self.get_loss(episode_no)
if loss is None:
return None, None
self.policy_net.zero_grad()
self.optimizer.zero_grad()
loss.backward()
if np.greater(self.training.optimizer['clip_grad_norm'], 0.):
torch.nn.utils.clip_grad_norm_(
self.policy_net.parameters(),
self.training.optimizer['clip_grad_norm'])
self.optimizer.step()
return torch.mean(loss), torch.mean(q_values)
def finalize_episode(self, episode_no: int) -> None:
if (episode_no + self.training.batch_size) % \
self.training.target_net_update_frequency <= \
episode_no % self.training.target_net_update_frequency:
self.update_target_net()
if episode_no < self.training.learn_from_this_episode:
return
if episode_no < self.training.epsilon_greedy['episodes'] + \
self.training.learn_from_this_episode:
self.epsilon = self.epsilon_scheduler.value(
episode_no - self.training.learn_from_this_episode)
self.epsilon = max(self.epsilon, 0.)
if self.scheduler is not None and episode_no > \
self.training.learn_from_this_episode:
for _ in range(self.training.batch_size):
self.scheduler.step()
| 29,311 | 44.234568 | 80 | py |
LTL-GATA | LTL-GATA-main/src/graph_updater.py | from typing import Tuple, List, Dict, Any
from pathlib import Path
import copy
import torch.nn.functional as F
import numpy as np
import torch
from utils import to_pt, max_len, pad_sequences, to_np
from model.layers import (
CQAttention, PointerSoftmax,
DecoderBlock, EncoderBlock, Embedding,
masked_softmax, SelfAttention, masked_mean,
DecoderBlockForObsGen, ObservationDiscriminator
)
from components import Vocabulary
class RelationalGraphConvolution(torch.nn.Module):
"""
Simple R-GCN layer, modified from theano/keras implementation from
https://github.com/tkipf/relational-gcn
We also consider relation representation here (relation labels matter)
"""
def __init__(self, entity_input_dim, relation_input_dim,
num_relations, out_dim, bias=True, num_bases=0):
super(RelationalGraphConvolution, self).__init__()
self.entity_input_dim = entity_input_dim
self.relation_input_dim = relation_input_dim
self.out_dim = out_dim
self.num_relations = num_relations
self.num_bases = num_bases
if self.num_bases > 0:
self.bottleneck_layer = torch.nn.Linear(
(self.entity_input_dim + self.relation_input_dim) *
self.num_relations, self.num_bases, bias=False)
self.weight = torch.nn.Linear(
self.num_bases, self.out_dim, bias=False)
else:
self.weight = torch.nn.Linear(
(self.entity_input_dim + self.relation_input_dim) *
self.num_relations, self.out_dim, bias=False)
if bias:
self.bias = torch.nn.Parameter(torch.FloatTensor(self.out_dim))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight.weight.data)
if self.bias is not None:
self.bias.data.fill_(0)
def forward(self, node_features, relation_features, adj):
# node_features: batch x num_entity x in_dim
# relation_features: batch x num_relation x in_dim
# adj: batch x num_relations x num_entity x num_entity
supports = []
for relation_idx in range(self.num_relations):
# batch x 1 x in_dim
_r_features = relation_features[:, relation_idx: relation_idx + 1]
_r_features = _r_features.repeat(
1, node_features.size(1), 1) # batch x num_entity x in_dim
# batch x num_entity x in_dim+in_dim
supports.append(torch.bmm(adj[:, relation_idx], torch.cat(
[node_features, _r_features], dim=-1)))
# batch x num_entity x (in_dim+in_dim)*num_relations
supports = torch.cat(supports, dim=-1)
if self.num_bases > 0:
supports = self.bottleneck_layer(supports)
output = self.weight(supports) # batch x num_entity x out_dim
if self.bias is not None:
return output + self.bias
else:
return output
class StackedRelationalGraphConvolution(torch.nn.Module):
'''
input: entity features: batch x num_entity x input_dim
relation features: batch x num_relations x input_dim
adjacency matrix: batch x num_relations x num_entity x num_entity
'''
def __init__(self, entity_input_dim, relation_input_dim,
num_relations, hidden_dims, num_bases,
use_highway_connections=False, dropout_rate=0.0,
real_valued_graph=False):
super(StackedRelationalGraphConvolution, self).__init__()
self.entity_input_dim = entity_input_dim
self.relation_input_dim = relation_input_dim
self.hidden_dims = hidden_dims
self.num_relations = num_relations
self.dropout_rate = dropout_rate
self.num_bases = num_bases
self.real_valued_graph = real_valued_graph
self.nlayers = len(self.hidden_dims)
self.stack_gcns()
self.use_highway_connections = use_highway_connections
if self.use_highway_connections:
self.stack_highway_connections()
def stack_highway_connections(self):
highways = [torch.nn.Linear(
self.hidden_dims[i], self.hidden_dims[i]) for i in range(
self.nlayers)]
self.highways = torch.nn.ModuleList(highways)
self.input_linear = torch.nn.Linear(
self.entity_input_dim, self.hidden_dims[0])
def stack_gcns(self):
gcns = [RelationalGraphConvolution(
self.entity_input_dim if i == 0 else self.hidden_dims[i - 1],
self.relation_input_dim,
self.num_relations, self.hidden_dims[i],
num_bases=self.num_bases)
for i in range(self.nlayers)]
self.gcns = torch.nn.ModuleList(gcns)
def forward(self, node_features, relation_features, adj):
x = node_features
for i in range(self.nlayers):
if self.use_highway_connections:
if i == 0:
prev = self.input_linear(x)
else:
prev = x.clone()
# batch x num_nodes x hid
x = self.gcns[i](x, relation_features, adj)
if self.real_valued_graph:
x = torch.sigmoid(x)
else:
x = F.relu(x)
x = F.dropout(x, self.dropout_rate, training=self.training)
if self.use_highway_connections:
gate = torch.sigmoid(self.highways[i](x))
x = gate * x + (1 - gate) * prev
return x
class GraphUpdater(torch.nn.Module):
def __init__(self, checkpoint: Path,
vocab_path: Path,
config: Dict[str, Any],
word_vocab: Vocabulary,
pretrained_embedding_path: Path = None,
relation_vocab: Vocabulary = None,
node_vocab: Vocabulary = None,
**kwargs) -> None:
super(GraphUpdater, self).__init__(**kwargs)
self.config = config
self._dummy = torch.nn.Parameter(torch.empty(0))
self._facts: List[Tuple[str, str, str]] = list()
self.use_ground_truth_graph = config.use_ground_truth_graph
self._word_vocab = Vocabulary(word_vocab.original_tokens,
name='GraphWordVocab',
original_only=True)
self._node_vocab = node_vocab
self._relation_vocab = relation_vocab
self.origin_relation_number = int((
len(self._relation_vocab) - 1) / 2)
self.word_embedding = Embedding(
embedding_size=config.word_embedding_size,
vocab_size=len(self._word_vocab),
id2word=self._word_vocab,
dropout_rate=config.embedding_dropout,
load_pretrained=True,
trainable=False,
embedding_oov_init="random",
pretrained_embedding_path=str(pretrained_embedding_path))
self.word_embedding_prj = torch.nn.Linear(
config.word_embedding_size, config.block_hidden_dim,
bias=False)
self.node_embeddings, self.relation_embeddings = None, None
self.node_embedding = Embedding(
embedding_size=config.node_embedding_size,
vocab_size=len(node_vocab),
trainable=True,
dropout_rate=config.embedding_dropout)
self.relation_embedding = Embedding(
embedding_size=config.relation_embedding_size,
vocab_size=len(relation_vocab),
trainable=True,
dropout_rate=config.embedding_dropout)
self.rgcns = StackedRelationalGraphConvolution(
entity_input_dim=config.node_embedding_size +
config.block_hidden_dim,
relation_input_dim=config.relation_embedding_size +
config.block_hidden_dim,
num_relations=len(self._relation_vocab),
hidden_dims=config.gcn_hidden_dims,
num_bases=config.gcn_num_bases,
use_highway_connections=config.gcn_highway_connections,
dropout_rate=config.gcn_dropout,
real_valued_graph=config.use_ground_truth_graph or
config.real_valued)
self.real_valued_graph = config.real_valued
self.self_attention = None
if config.use_self_attention:
self.self_attention = SelfAttention(
config.block_hidden_dim, config.n_heads, 0.)
if not config.use_ground_truth_graph:
if self.real_valued_graph:
# TODO CHANGE THIS TO 50 = batch_size
self.prev_graph_hidden_state = None
self.curr_mat = None
self.obs_gen_attention = CQAttention(
block_hidden_dim=config.block_hidden_dim,
dropout=config.gcn_dropout)
self.obs_gen_attention_prj = torch.nn.Linear(
config.block_hidden_dim * 4, config.block_hidden_dim,
bias=False)
self.obs_gen_decoder = torch.nn.ModuleList([
DecoderBlockForObsGen(
ch_num=config.block_hidden_dim, k=5,
block_hidden_dim=config.block_hidden_dim,
n_head=config.n_heads,
dropout=config.block_dropout)
for _ in range(config.decoder_layers)])
self.obs_gen_tgt_word_prj = torch.nn.Linear(
config.block_hidden_dim, len(self._word_vocab), bias=False)
self.obs_gen_linear_1 = torch.nn.Linear(
config.block_hidden_dim, config.block_hidden_dim)
self.obs_gen_linear_2 = torch.nn.Linear(
config.block_hidden_dim, int(
len(self._relation_vocab) / 2) *
len(self._node_vocab) * len(self._node_vocab))
self.obs_gen_attention_to_rnn_input = torch.nn.Linear(
config.block_hidden_dim * 4, config.block_hidden_dim)
self.obs_gen_graph_rnncell = torch.nn.GRUCell(
config.block_hidden_dim, config.block_hidden_dim)
self.observation_discriminator = ObservationDiscriminator(
config.block_hidden_dim)
self.max_target_length = config.max_target_length
# Accounts for adding "self" and duplicate "_reverse"
# see agents.py:79-82
self.cmd_gen_attention = CQAttention(
block_hidden_dim=config.block_hidden_dim,
dropout=config.attention_dropout)
self.cmd_gen_attention_prj = torch.nn.Linear(
config.block_hidden_dim * 4,
config.block_hidden_dim, bias=False)
self.pointer_softmax = PointerSoftmax(
input_dim=config.block_hidden_dim,
hidden_dim=config.block_hidden_dim)
self.tgt_word_prj = torch.nn.Linear(
config.block_hidden_dim,
len(self._word_vocab),
bias=False)
self.decoder = torch.nn.ModuleList([
DecoderBlock(ch_num=config.block_hidden_dim, k=5,
block_hidden_dim=config.block_hidden_dim,
n_head=config.n_heads,
dropout=config.block_dropout)
for _ in range(config.decoder_layers)])
self.encoder_for_pretraining_tasks = torch.nn.ModuleList([
EncoderBlock(conv_num=config.encoder_conv_num,
ch_num=config.block_hidden_dim,
k=5, block_hidden_dim=config.block_hidden_dim,
n_head=config.n_heads,
dropout=config.block_dropout)
for _ in range(config.encoder_layers)])
self.encoder_conv_num = config.encoder_conv_num
if config.from_pretrained:
self.load_checkpoint(checkpoint)
def load_checkpoint(self, checkpoint: Path) -> None:
pretrained_dict = torch.load(checkpoint)
model_dict = self.state_dict()
del model_dict['_dummy']
for k, v in model_dict.items():
if k not in pretrained_dict:
import pdb
pdb.set_trace()
assert k in pretrained_dict
model_dict = {k: v for k, v in pretrained_dict.items() if
k in model_dict}
self.load_state_dict(model_dict, strict=False)
@property
def device(self) -> str:
return self._dummy.device
def tokenize(self, inputs: List[str]) -> torch.Tensor:
word_list = [item.split() for item in inputs]
word_id_list = [[self._word_vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word = to_pt(input_word, self.device != 'cpu')
return input_word
def encode_text(self,
inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
embeddings, mask = self.embed(inputs) # batch x seq_len x emb
# batch x seq_len x seq_len
squared_mask = torch.bmm(mask.unsqueeze(-1), mask.unsqueeze(1))
encoding_sequence = embeddings
for i, encoder in enumerate(self.encoder_for_pretraining_tasks):
# batch x time x enc
encoding_sequence = encoder(
encoding_sequence, squared_mask, i * (
self.encoder_conv_num + 2) + 1,
len(self.encoder_for_pretraining_tasks))
return encoding_sequence, mask
def embed(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
word_embeddings, mask = self.word_embedding(
inputs) # batch x time x emb
word_embeddings = self.word_embedding_prj(word_embeddings)
word_embeddings = word_embeddings * \
mask.unsqueeze(-1) # batch x time x hid
return word_embeddings, mask
def get_subsequent_mask(self, seq: torch.Tensor) -> torch.Tensor:
''' For masking out the subsequent info. '''
_, length = seq.size()
subsequent_mask = torch.triu(torch.ones(
(length, length)), diagonal=1).float()
subsequent_mask = 1.0 - subsequent_mask
if seq.is_cuda:
subsequent_mask = subsequent_mask.cuda()
subsequent_mask = subsequent_mask.unsqueeze(0) # 1 x time x time
return subsequent_mask
def decode(self, inputs: torch.Tensor,
h_og: torch.Tensor,
obs_mask: torch.Tensor,
h_go: torch.Tensor,
node_mask: torch.Tensor,
input_obs: torch.Tensor) -> torch.Tensor:
trg_embeddings, trg_mask = self.embed(
inputs) # batch x target_len x emb
# batch x target_len x target_len
trg_mask_square = torch.bmm(
trg_mask.unsqueeze(-1), trg_mask.unsqueeze(1))
trg_mask_square = trg_mask_square * \
self.get_subsequent_mask(
inputs) # batch x target_len x target_len
# batch x target_len x obs_len
obs_mask_square = torch.bmm(
trg_mask.unsqueeze(-1), obs_mask.unsqueeze(1))
# batch x target_len x node_len
node_mask_square = torch.bmm(
trg_mask.unsqueeze(-1), node_mask.unsqueeze(1))
trg_decoder_output = trg_embeddings
for i, decoder in enumerate(self.decoder):
trg_decoder_output, target_target_representations, \
target_source_representations, target_source_attention = \
decoder(
trg_decoder_output, trg_mask, trg_mask_square,
h_og, obs_mask_square, h_go, node_mask_square, i * 3 + 1,
len(self.decoder)) # batch x time x hid
trg_decoder_output = self.tgt_word_prj(trg_decoder_output)
trg_decoder_output = masked_softmax(
trg_decoder_output, m=trg_mask.unsqueeze(-1), axis=-1)
output = self.pointer_softmax(
target_target_representations, target_source_representations,
trg_decoder_output, trg_mask, target_source_attention,
obs_mask, input_obs)
return output
def get_word_input(self, input_strings):
word_list = [item.split() for item in input_strings]
word_id_list = [[self._word_vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word = to_pt(input_word, self.device != 'cpu')
return input_word
def get_graph_node_name_input(self):
res = copy.copy(self._node_vocab)
input_node_name = self.get_word_input(res) # num_node x words
return input_node_name
def get_graph_relation_name_input(self):
res = copy.copy(self._relation_vocab)
res = [item.replace("_", " ") for item in res]
input_relation_name = self.get_word_input(res) # num_node x words
return input_relation_name
def get_graph_relation_representations(self, relation_names_word_ids):
# relation_names_word_ids: num_relation x num_word
relation_name_embeddings, _mask = self.embed(
relation_names_word_ids) # num_relation x num_word x emb
_mask = torch.sum(_mask, -1) # num_relation
relation_name_embeddings = torch.sum(
relation_name_embeddings, 1) # num_relation x hid
tmp = torch.eq(_mask, 0).float()
if relation_name_embeddings.is_cuda:
tmp = tmp.cuda()
_mask = _mask + tmp
relation_name_embeddings = relation_name_embeddings / \
_mask.unsqueeze(-1)
relation_name_embeddings = relation_name_embeddings.unsqueeze(
0) # 1 x num_relation x emb
relation_ids = np.arange(len(self._relation_vocab)) # num_relation
relation_ids = to_pt(relation_ids,
cuda=relation_names_word_ids.is_cuda,
type='long').unsqueeze(0) # 1 x num_relation
relation_embeddings, _ = self.relation_embedding(
relation_ids) # 1 x num_relation x emb
# 1 x num_relation x emb+emb
relation_embeddings = torch.cat(
[relation_name_embeddings, relation_embeddings], dim=-1)
return relation_embeddings
def get_graph_node_representations(self, node_names_word_ids):
# node_names_word_ids: num_node x num_word
node_name_embeddings, _mask = self.embed(
node_names_word_ids) # num_node x num_word x emb
_mask = torch.sum(_mask, -1) # num_node
node_name_embeddings = torch.sum(
node_name_embeddings, 1) # num_node x hid
tmp = torch.eq(_mask, 0).float()
if node_name_embeddings.is_cuda:
tmp = tmp.cuda()
_mask = _mask + tmp
node_name_embeddings = node_name_embeddings / _mask.unsqueeze(-1)
node_name_embeddings = node_name_embeddings.unsqueeze(
0) # 1 x num_node x emb
node_ids = np.arange(len(self._node_vocab)) # num_node
node_ids = to_pt(node_ids,
cuda=node_names_word_ids.is_cuda,
type='long').unsqueeze(0) # 1 x num_node
node_embeddings, _ = self.node_embedding(
node_ids) # 1 x num_node x emb
# 1 x num_node x emb+emb
node_embeddings = torch.cat(
[node_name_embeddings, node_embeddings], dim=-1)
return node_embeddings
def get_graph_adjacency_matrix(self, triplets):
adj = np.zeros((len(triplets), len(self._relation_vocab), len(
self._node_vocab), len(self._node_vocab)), dtype="float32")
for b in range(len(triplets)):
node_exists = set()
for t in triplets[b]:
node1, node2, relation = t
assert node1 in self._node_vocab, \
node1 + " is not in node vocab"
assert node2 in self._node_vocab, \
node2 + " is not in node vocab"
assert relation in self._relation_vocab, \
relation + " is not in relation vocab"
node1_id, node2_id, relation_id = \
self._node_vocab[node1], self._node_vocab[node2], \
self._relation_vocab[relation]
adj[b][relation_id][node1_id][node2_id] = 1.0
adj[b][relation_id + self.origin_relation_number][
node2_id][node1_id] = 1.0
node_exists.add(node1_id)
node_exists.add(node2_id)
# self relation
for node_id in list(node_exists):
adj[b, -1, node_id, node_id] = 1.0
adj = to_pt(adj, self.device != 'cpu', type='float')
return adj
def encode_graph(self, graph_input):
# batch x num_node x emb+emb
if self.node_embeddings is None:
node_names_word_ids = self.get_graph_node_name_input()
self.node_embeddings = self.get_graph_node_representations(
node_names_word_ids) # 1 x num_node x emb+emb
if self.relation_embeddings is None:
relation_names_word_ids = self.get_graph_relation_name_input()
self.relation_embeddings = self.get_graph_relation_representations(
relation_names_word_ids) # 1 x num_node x emb+emb
if isinstance(graph_input, list):
input_adjacency_matrices = self.get_graph_adjacency_matrix(
graph_input)
elif isinstance(graph_input, torch.Tensor):
input_adjacency_matrices = graph_input
else:
raise NotImplementedError
input_adjacency_matrices = input_adjacency_matrices.to(self.device)
node_embeddings = self.node_embeddings.repeat(
input_adjacency_matrices.size(0), 1, 1)
# batch x num_relation x emb+emb
relation_embeddings = self.relation_embeddings.repeat(
input_adjacency_matrices.size(0), 1, 1)
# batch x num_node x enc
node_encoding_sequence = self.rgcns(
node_embeddings, relation_embeddings, input_adjacency_matrices)
if self.use_ground_truth_graph:
node_mask = torch.ones(node_encoding_sequence.size(
0), node_encoding_sequence.size(1)) # batch x num_node
if node_encoding_sequence.is_cuda:
node_mask = node_mask.cuda()
else:
# batch x num_node x num_node
node_mask = torch.sum(input_adjacency_matrices[:, :-1, :, :], 1)
node_mask = torch.sum(node_mask, -1) + \
torch.sum(node_mask, -2) # batch x num_node
node_mask = torch.gt(node_mask, 0).float()
node_encoding_sequence = node_encoding_sequence * \
node_mask.unsqueeze(-1)
if self.self_attention is not None:
mask_squared = torch.bmm(
node_mask.unsqueeze(-1), node_mask.unsqueeze(1))
node_encoding_sequence, _ = self.self_attention(
node_encoding_sequence, mask_squared, node_encoding_sequence,
node_encoding_sequence)
return node_encoding_sequence, node_mask
def hidden_to_adjacency_matrix(self, hidden, batch_size):
num_node = len(self._node_vocab)
num_relation = len(self._relation_vocab)
if hidden is None:
adjacency_matrix = torch.zeros(
batch_size, num_relation, num_node, num_node)
adjacency_matrix = adjacency_matrix.cuda()
else:
adjacency_matrix = torch.tanh(self.obs_gen_linear_2(
F.relu(self.obs_gen_linear_1(
hidden)))).view(batch_size,
int(num_relation / 2), num_node, num_node)
adjacency_matrix = adjacency_matrix.repeat(1, 2, 1, 1)
for i in range(int(num_relation / 2)):
adjacency_matrix[:, int(
num_relation / 2) + i] = \
adjacency_matrix[:, i].permute(0, 2, 1)
return adjacency_matrix
@torch.no_grad()
def forward(
self, observations: List[str],
graph: List[Tuple[str, str, str]],
actions: List[str] = None,
infos: Dict[str, Any] = None) -> List[Tuple[str, str, str]]:
if self.use_ground_truth_graph:
if infos is None:
raise ValueError(
"Can't have 'None' infos for ground truth graph")
if 'facts' not in infos.keys():
raise ValueError(
"Must have 'facts' as infos key. Set EnvInfos(facts=True)")
return infos['facts']
elif self.real_valued_graph:
if self.prev_graph_hidden_state is not None:
prev_graph_hidden_state = self.prev_graph_hidden_state.detach()
# TE-encode
input_obs = self.get_word_input(observations)
prev_action_word_ids = self.get_word_input(actions)
prev_action_encoding_sequence, prev_action_mask = self.encode_text(
prev_action_word_ids)
obs_encoding_sequence, obs_mask = self.encode_text(input_obs)
prev_adjacency_matrix = self.hidden_to_adjacency_matrix(
prev_graph_hidden_state, batch_size=len(
observations))
node_encoding_sequence, node_mask = self.encode_graph(
prev_adjacency_matrix)
h_ag = self.obs_gen_attention(
prev_action_encoding_sequence,
node_encoding_sequence, prev_action_mask, node_mask)
h_ga = self.obs_gen_attention(
node_encoding_sequence, prev_action_encoding_sequence,
node_mask, prev_action_mask)
h_ag = self.obs_gen_attention_prj(
h_ag)
h_ga = self.obs_gen_attention_prj(
h_ga)
h_og = self.obs_gen_attention(
obs_encoding_sequence, node_encoding_sequence, obs_mask,
node_mask)
h_go = self.obs_gen_attention(
node_encoding_sequence, obs_encoding_sequence, node_mask,
obs_mask)
h_og = self.obs_gen_attention_prj(
h_og) # bs X len X block_hidden_dim
h_go = self.obs_gen_attention_prj(
h_go) # bs X len X block_hidden_dim
ave_h_go = masked_mean(h_go, m=node_mask, dim=1)
ave_h_og = masked_mean(h_og, m=obs_mask, dim=1)
ave_h_ga = masked_mean(h_ga, m=node_mask, dim=1)
ave_h_ag = masked_mean(h_ag, m=prev_action_mask, dim=1)
rnn_input = self.obs_gen_attention_to_rnn_input(
torch.cat([ave_h_go, ave_h_og, ave_h_ga, ave_h_ag], dim=1))
rnn_input = torch.tanh(rnn_input) # batch x block_hidden_dim
h_t = self.obs_gen_graph_rnncell(
rnn_input, prev_graph_hidden_state) if \
prev_graph_hidden_state is not None else \
self.obs_gen_graph_rnncell(rnn_input)
current_adjacency_matrix = self.hidden_to_adjacency_matrix(
h_t, batch_size=len(
observations))
del self.prev_graph_hidden_state
self.prev_graph_hidden_state = h_t.detach()
self.curr_mat = current_adjacency_matrix.detach().cpu()
return self.curr_mat
else:
batch_size = len(observations)
# encode
input_obs = self.get_word_input(observations)
obs_encoding_sequence, obs_mask = self.encode_text(input_obs)
node_encoding_sequence, node_mask = self.encode_graph(
graph)
h_og = self.cmd_gen_attention(
obs_encoding_sequence, node_encoding_sequence, obs_mask,
node_mask)
h_go = self.cmd_gen_attention(
node_encoding_sequence, obs_encoding_sequence, node_mask,
obs_mask)
h_og = self.cmd_gen_attention_prj(h_og)
h_go = self.cmd_gen_attention_prj(h_go)
# step 2, greedy generation
# decode
input_target_token_list = [["<bos>"] for i in range(batch_size)]
eos = np.zeros(batch_size)
for _ in range(self.max_target_length):
input_target = self.tokenize(
[" ".join(item) for item in input_target_token_list])
# batch x time x vocab
pred = self.decode(input_target, h_og,
obs_mask, h_go, node_mask, input_obs)
# pointer softmax
pred = to_np(pred[:, -1]) # batch x vocab
pred = np.argmax(pred, -1) # batch
for b in range(batch_size):
new_stuff = [self._word_vocab[int(pred[b])]
] if eos[b] == 0 else list()
input_target_token_list[b] = input_target_token_list[
b] + new_stuff
if pred[b] == self._word_vocab["<eos>"]:
eos[b] = 1
if np.sum(eos) == batch_size:
break
return [" ".join(item[1:]) for item in input_target_token_list]
| 29,695 | 44.268293 | 79 | py |
LTL-GATA | LTL-GATA-main/src/optim/radam.py | import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[
[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * \
state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (
N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'], )
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size *
group['lr'])
p.data.copy_(p_data_fp32)
elif step_size > 0:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'], )
p_data_fp32.add_(exp_avg, alpha=-step_size * group['lr'])
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'])
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (
N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
p.data.copy_(p_data_fp32)
elif self.degenerated_to_sgd:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'])
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(exp_avg, alpha=-step_size)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup=warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + \
state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = scheduled_lr * \
math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* scheduled_lr)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size, )
p.data.copy_(p_data_fp32)
return loss
| 11,028 | 39.848148 | 111 | py |
LTL-GATA | LTL-GATA-main/src/optim/__init__.py | from typing import Dict, Any
import torch
from optim.radam import RAdam
def get_optimizer(net: torch.nn.Module,
config: Dict[str, Any]) -> torch.optim.Optimizer:
# exclude some parameters from optimizer
param_frozen_list = [] # should be changed into torch.nn.ParameterList()
param_active_list = [] # should be changed into torch.nn.ParameterList()
for k, v in net.named_parameters():
keep_this = True
for keyword in set(config['fix_parameters_keywords']):
if keyword in k:
param_frozen_list.append(v)
keep_this = False
break
if keep_this:
param_active_list.append(v)
param_frozen_list = torch.nn.ParameterList(param_frozen_list)
param_active_list = torch.nn.ParameterList(param_active_list)
params = [{
'params': param_frozen_list, 'lr': 0.0},
{'params': param_active_list, 'lr': config['kwargs']['lr']}]
optimizer_kwargs = config['kwargs']
if config['name'] == 'adam':
optimizer = torch.optim.Adam(params, **optimizer_kwargs)
elif config['name'] == 'radam':
optimizer = RAdam(params, **optimizer_kwargs)
elif config['name'] == 'rmsprop':
optimizer = torch.optim.RMSprop(params, **optimizer_kwargs)
else:
raise NotImplementedError
scheduler = None
scheduler_kwargs = config['scheduler_kwargs']
if config['scheduler'] == 'steplr':
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, **scheduler_kwargs)
return optimizer, scheduler
| 1,588 | 35.953488 | 77 | py |
LTL-GATA | LTL-GATA-main/src/model/features.py | from typing import List, Tuple
from argparse import Namespace
import pdb
from torch.autograd import Variable
import torch
from model.layers import Embedding, EncoderBlock, SelfAttention
from utils import max_len, to_pt, pad_sequences
class SimpleMLP(torch.nn.Module):
def __init__(self,
word_embedding_size: int,
action_net_hidden_size: int, **kwargs):
super(SimpleMLP, self).__init__(**kwargs)
self.layers = torch.nn.Sequential(
torch.nn.Linear(word_embedding_size, word_embedding_size),
torch.nn.ReLU(),
torch.nn.Linear(word_embedding_size, action_net_hidden_size),
)
def forward(self, inputs: torch.Tensor,) -> torch.Tensor:
out = self.layers(inputs)
return out
class SimpleLSTM(torch.nn.Module):
def __init__(self,
word_embedding_size: int,
hidden_size: int,
num_layers: int,
action_net_hidden_size: int,
**kwargs):
super(SimpleLSTM, self).__init__(**kwargs)
self.num_layers = num_layers
self.hidden_size = hidden_size
self.lstm = torch.nn.LSTM(
input_size=word_embedding_size,
hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.head = torch.nn.Sequential(
torch.nn.ReLU(),
torch.nn.Linear(hidden_size, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, action_net_hidden_size)
)
self._dummy = torch.nn.Parameter(torch.empty(0))
@property
def device(self) -> str:
return self._dummy.device
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
batch_size, length, hidden = inputs.shape
h_0 = Variable(torch.zeros(self.num_layers, batch_size,
self.hidden_size)).to(device=self.device)
c_0 = Variable(torch.zeros(self.num_layers, batch_size,
self.hidden_size)).to(device=self.device)
output, (hn, cn) = self.lstm(inputs, (h_0, c_0))
hn = hn.view(-1, self.hidden_size)
out = self.head(hn)
return out
class TextEncoder(torch.nn.Module):
def __init__(self,
config: Namespace,
vocab: List[str],
**kwargs) -> None:
super(TextEncoder, self).__init__(**kwargs)
self._dummy = torch.nn.Parameter(torch.empty(0))
for k, v in vars(config).items():
setattr(self, k, v)
self.load_vocab(vocab)
if self.use_pretrained_lm:
from model.pretrained_lm import get_model_tokenizer
self.lm, self.tokenizer = get_model_tokenizer(
self.pretrained_lm_name,
self.pretrained_lm_checkpoint)
# ----- Add Tokens -----
vocab += list(self.tokenizer.special_tokens_map.values())
# tokens = list(dict.fromkeys(tokens))
add_tokens = [token for token in vocab.tokens if token not in
self.tokenizer.vocab]
self.tokenizer.add_tokens(add_tokens)
self.lm.resize_token_embeddings(len(self.tokenizer))
if self.pretrained_lm_name == 'bert':
embedding_dim = 768
self.encoder = torch.nn.Linear(embedding_dim,
self.action_net_hidden_size)
# ----- Add Tokens -----
# ----- Delete Unused Tokens -----
# count = 0
# word_embedding_idxs = list()
# vocab = list(self.tokenizer.get_vocab().items())
# for tok, idx in vocab:
# if tok not in tokens:
# del self.tokenizer.vocab[tok]
# else:
# self.tokenizer.vocab[tok] = count
# word_embedding_idxs.append(idx)
# count += 1
# self.tokenizer.added_tokens_encoder.clear()
# self.tokenizer.added_tokens_decoder.clear()
# assert len(self.tokenizer) == len(tokens)
# word_embeddings = self.lm.embeddings.word_embeddings.weight[
# word_embedding_idxs]
# self.lm.resize_token_embeddings(len(self.tokenizer))
# self.lm.embeddings.word_embeddings.weight.data = word_embeddings
# ----- Delete Unused Tokens -----
else:
self.word_embedding = Embedding(
embedding_size=self.word_embedding_size,
vocab_size=len(vocab),
id2word=vocab.tokens,
dropout_rate=0.,
load_pretrained=True,
trainable=False,
embedding_oov_init="random" if not
self.one_hot_encoding else 'onehot',
pretrained_embedding_path=self.pretrained_embedding_path)
self.word_embedding_prj = torch.nn.Linear(
self.word_embedding_size, self.action_net_hidden_size,
bias=False)
if self.self_attention:
self.self_attention = SelfAttention(
self.action_net_hidden_size, self.n_heads, 0.)
else:
self.self_attention = None
if config.lstm_backbone:
self.encoder = SimpleLSTM(
word_embedding_size=self.word_embedding_size,
hidden_size=128,
num_layers=self.num_encoders,
action_net_hidden_size=self.action_net_hidden_size)
elif config.mlp_backbone:
self.encoder = SimpleMLP(
word_embedding_size=self.word_embedding_size,
action_net_hidden_size=self.action_net_hidden_size)
else:
self.encoder = torch.nn.ModuleList([
EncoderBlock(
conv_num=self.encoder_conv_num,
ch_num=self.action_net_hidden_size, k=5,
block_hidden_dim=self.action_net_hidden_size,
n_head=self.n_heads, dropout=0.,)
for _ in range(self.num_encoders)])
self.num_encoders = self.num_encoders
if not self.trainable:
for param in self.parameters():
param.requires_grad = False
if self.mlm_loss:
self.mlm_head = torch.nn.Sequential(
torch.nn.Linear(self.action_net_hidden_size,
self.action_net_hidden_size, bias=True),
torch.nn.LayerNorm(self.action_net_hidden_size,
eps=1e-12, elementwise_affine=True),
torch.nn.Linear(self.action_net_hidden_size,
len(self.vocab), bias=True),
)
if False:
self.inverse_dynamics = torch.nn.Sequential(
torch.nn.Linear(self.action_net_hidden_size * 2,
self.action_net_hidden_size * 2),
torch.nn.ReLU(),
torch.nn.Linear(self.action_net_hidden_size * 2,
self.action_net_hidden_size)
)
def load_vocab(self, vocab) -> None:
self.vocab = vocab
@property
def device(self) -> str:
return self._dummy.device
def compute_inverse_dynamics_loss(self, obs: List[str],
next_obs: List[str],
actions: List[str],
hat: bool = True):
encoded_obs, obs_mask = self.forward(obs)
encoded_next_obs, next_obs_mask = self.forward(next_obs)
encoded_obs = torch.sum(encoded_obs, dim=1)
encoded_next_obs = torch.sum(encoded_next_obs, dim=1)
actions_inv = self.inverse_dynamics(torch.cat((
encoded_obs, encoded_next_obs - encoded_obs), dim=1))
return None
def compute_mlm_loss(self, text: List[str]):
if not self.mlm_loss:
return None
inputs = self.tokenize(text)
labels = torch.clone(inputs)
rand = torch.rand(inputs.shape, device=self.device)
masking_mask = (rand < 0.15) * (inputs != self.vocab.pad_token_id)
inputs[torch.where(masking_mask)] = self.vocab.mask_token_id
sequence_output, _ = self.forward(inputs, compute_word_ids=False)
predictions = self.mlm_head(sequence_output)
# print([self.vocab.tokens[idx]
# for idx in inputs[0] if idx != self.vocab.pad_token_id])
# print([self.vocab.tokens[idx]
# for idx in torch.argmax(predictions[0], dim=1)
# if idx != self.vocab.pad_token_id])
loss_fcn = torch.nn.CrossEntropyLoss()
labels[torch.where(labels == self.vocab.pad_token_id)] = -100
return loss_fcn(predictions.view((-1, len(self.vocab))),
labels.view(-1))
def tokenize(self, text: List[str]) -> torch.Tensor:
word_list = [item.split() for item in text]
word_id_list = [[self.vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word_ids = to_pt(input_word, True)
return input_word_ids
def forward(self, text: List[str],
compute_word_ids: bool = True,
position_encoding_method: str = 'cossine',
trees=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
@arguments:
text: list of strings of shape [batch-size]
pad: number of additional padding items to append
to text list of strings for batch reasons
@returns:
encodings: encoded text, Tensor, size
[batch-size, sequence-length, embed-size]
mask: mask of size
[batch-size, sequence-length]
"""
if self.use_pretrained_lm:
inputs = self.tokenizer.batch_encode_plus(
text, padding=True, add_special_tokens=True,
return_tensors='pt')
for k, v in inputs.items():
inputs[k] = v.to(self.device)
outputs = self.lm(**inputs)
outputs = outputs.last_hidden_state
outputs = self.encoder(outputs)
return outputs, inputs.attention_mask
else:
if compute_word_ids:
input_word_ids = self.tokenize(text)
else:
input_word_ids = text
embeddings, mask = self.word_embedding(
input_word_ids) # batch x time x emb
squared_mask = torch.bmm(mask.unsqueeze(-1), mask.unsqueeze(1))
position_encoding = None
if position_encoding_method == 'tree':
raise NotImplementedError
if self.lstm_backbone:
encoded_text = embeddings
encoded_text = self.encoder(encoded_text)
elif self.mlp_backbone:
encoded_text = torch.sum(embeddings, dim=1)
encoded_text = self.encoder(encoded_text)
else:
embeddings = self.word_embedding_prj(embeddings)
embeddings = embeddings * \
mask.unsqueeze(-1) # batch x time x hid
encoded_text = embeddings
for i in range(self.num_encoders):
# batch x time x enc
encoded_text = self.encoder[i](
encoded_text, squared_mask, i * (
self.encoder_conv_num + 2) + 1, self.num_encoders,
position_encoding_method=position_encoding_method,
position_encoding=position_encoding)
if self.self_attention is not None:
mask_squared = torch.bmm(mask.unsqueeze(dim=-1),
mask.unsqueeze(dim=1))
encoded_text, _ = self.self_attention(
encoded_text, mask_squared, encoded_text, encoded_text)
return encoded_text, mask
class BagOfWords(torch.nn.Module):
def __init__(self,
config: Namespace,
vocab: List[str],
**kwargs) -> None:
super(BagOfWords, self).__init__(**kwargs)
self._dummy = torch.nn.Parameter(torch.empty(0))
for k, v in vars(config).items():
setattr(self, k, v)
self.load_vocab(vocab)
def load_vocab(self, vocab) -> None:
self.vocab = vocab
@property
def device(self) -> str:
return self._dummy.device
def tokenize(self, text: List[str]) -> torch.Tensor:
word_list = [item.split() for item in text]
word_id_list = [[self.vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word_ids = to_pt(input_word, True)
return input_word_ids
def forward(self, text: List[str],
compute_word_ids: bool = True,
position_encoding_method: str = 'cossine',
trees=None) -> Tuple[torch.Tensor, torch.Tensor]:
...
| 13,426 | 41.090909 | 78 | py |
LTL-GATA | LTL-GATA-main/src/model/utils.py | import torch
import math
def get_timing_signal(length: int, channels: int,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4) -> torch.Tensor:
position = torch.arange(length).type(torch.float32)
num_timescales = channels // 2
log_timescale_increment = (math.log(
float(max_timescale) / float(min_timescale)) /
(float(num_timescales)-1))
inv_timescales = min_timescale * torch.exp(
torch.arange(num_timescales).type(torch.float32
) * -log_timescale_increment)
scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
m = torch.nn.ZeroPad2d((0, (channels % 2), 0, 0))
signal = m(signal)
signal = signal.view(1, length, channels)
return signal
def PosEncoder(x: torch.Tensor,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4):
length = x.size(1)
channels = x.size(2)
signal = get_timing_signal(length, channels, min_timescale, max_timescale)
return x + (signal.cuda() if x.is_cuda else signal)
def TreePosEncoder(x):
...
| 1,224 | 34 | 79 | py |
LTL-GATA | LTL-GATA-main/src/model/layers.py | import torch.nn.functional as F
import numpy as np
import torch
import h5py
from model.utils import PosEncoder, TreePosEncoder
class H5EmbeddingManager(object):
def __init__(self, h5_path):
f = h5py.File(h5_path, 'r')
self.W = np.array(f['embedding'])
# print("embedding data type=%s, shape=%s" %
# (type(self.W), self.W.shape))
self.id2word = f['words_flatten'][0].split(b'\n')
self.id2word = [item.decode("utf-8") for item in self.id2word]
self.word2id = dict(zip(self.id2word, range(len(self.id2word))))
def __getitem__(self, item):
item_type = type(item)
if item_type is str:
index = self.word2id[item]
embs = self.W[index]
return embs
else:
raise RuntimeError("don't support type: %s" % type(item))
def word_embedding_initialize(self, words_list, dim_size=300, scale=0.1,
oov_init='random'):
shape = (len(words_list), dim_size)
self.rng = np.random.RandomState(42)
if 'zero' == oov_init:
W2V = np.zeros(shape, dtype='float32')
elif 'one' == oov_init:
W2V = np.ones(shape, dtype='float32')
elif 'onehot' == oov_init:
if len(words_list) > dim_size:
raise ValueError("Can't one-hot encode vocab size > dim size")
W2V = np.zeros((dim_size, dim_size))
np.fill_diagonal(W2V, 1)
assert (np.diag(W2V) == np.ones(dim_size)).all()
return W2V
else:
W2V = self.rng.uniform(
low=-scale, high=scale, size=shape).astype('float32')
W2V[0, :] = 0 # for padding i guess
in_vocab = np.ones(shape[0], dtype=np.bool)
word_ids = list()
for i, word in enumerate(words_list):
if '_' in word:
ids = [self.word2id[w]
if w in self.word2id else None for w in word.split('_')]
if not any(ids):
in_vocab[i] = False
else:
word_ids.append(ids)
elif word in self.word2id:
word_ids.append(self.word2id[word])
else:
in_vocab[i] = False
for i, (add, ids) in enumerate(zip(in_vocab, word_ids)):
if add:
if isinstance(ids, list):
W2V[i] = np.mean([self.W[x] for x in ids], axis=0)
else:
W2V[i] = self.W[ids]
# W2V[in_vocab] = self.W[np.array(word_ids, dtype='int32')][:, :dim_size]
return W2V
class Embedding(torch.nn.Module):
'''
inputs: x: batch x ...
outputs:embedding: batch x ... x emb
mask: batch x ...
'''
def __init__(self, embedding_size, vocab_size, dropout_rate=0.0,
trainable=True, id2word=None,
embedding_oov_init='random', load_pretrained=False,
pretrained_embedding_path=None):
super(Embedding, self).__init__()
self.embedding_size = embedding_size
self.vocab_size = vocab_size
self.id2word = id2word
self.dropout_rate = dropout_rate
self.load_pretrained = load_pretrained
self.embedding_oov_init = embedding_oov_init
self.pretrained_embedding_path = pretrained_embedding_path
self.trainable = trainable
self.embedding_layer = torch.nn.Embedding(
self.vocab_size, self.embedding_size, padding_idx=0)
self.init_weights()
def init_weights(self):
init_embedding_matrix = self.embedding_init()
if self.embedding_layer.weight.is_cuda:
init_embedding_matrix = init_embedding_matrix.cuda()
self.embedding_layer.weight = torch.nn.Parameter(init_embedding_matrix)
if not self.trainable:
self.embedding_layer.weight.requires_grad = False
def embedding_init(self):
# Embeddings
if self.load_pretrained is False:
word_embedding_init = np.random.uniform(
low=-0.05, high=0.05, size=(self.vocab_size,
self.embedding_size))
word_embedding_init[0, :] = 0
else:
embedding_initr = H5EmbeddingManager(
self.pretrained_embedding_path)
word_embedding_init = embedding_initr.word_embedding_initialize(
self.id2word,
dim_size=self.embedding_size,
oov_init=self.embedding_oov_init)
del embedding_initr
word_embedding_init = torch.from_numpy(word_embedding_init).float()
return word_embedding_init
def compute_mask(self, x):
mask = torch.ne(x, 0).float()
if x.is_cuda:
mask = mask.cuda()
return mask
def forward(self, x):
embeddings = self.embedding_layer(x) # batch x time x emb
embeddings = F.dropout(
embeddings, p=self.dropout_rate, training=self.training)
mask = self.compute_mask(x) # batch x time
return embeddings, mask
class ScaledDotProductAttention(torch.nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
attn = masked_softmax(attn, mask, 2)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class SelfAttention(torch.nn.Module):
''' From Multi-Head Attention module
https://github.com/jadore801120/attention-is-all-you-need-pytorch'''
def __init__(self, block_hidden_dim, n_head, dropout=0.1):
super().__init__()
self.n_head = n_head
self.block_hidden_dim = block_hidden_dim
self.w_qs = torch.nn.Linear(
block_hidden_dim, n_head * block_hidden_dim, bias=False)
self.w_ks = torch.nn.Linear(
block_hidden_dim, n_head * block_hidden_dim, bias=False)
self.w_vs = torch.nn.Linear(
block_hidden_dim, n_head * block_hidden_dim, bias=False)
torch.nn.init.normal_(self.w_qs.weight, mean=0,
std=np.sqrt(2.0 / (block_hidden_dim * 2)))
torch.nn.init.normal_(self.w_ks.weight, mean=0,
std=np.sqrt(2.0 / (block_hidden_dim * 2)))
torch.nn.init.normal_(self.w_vs.weight, mean=0,
std=np.sqrt(2.0 / (block_hidden_dim * 2)))
self.attention = ScaledDotProductAttention(
temperature=np.power(block_hidden_dim, 0.5))
self.fc = torch.nn.Linear(n_head * block_hidden_dim, block_hidden_dim)
self.layer_norm = torch.nn.LayerNorm(self.block_hidden_dim)
torch.nn.init.xavier_normal_(self.fc.weight)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, q, mask, k, v):
# q: batch x len_q x hid
# k: batch x len_k x hid
# v: batch x len_v x hid
# mask: batch x len_q x len_k
batch_size, len_q = q.size(0), q.size(1)
len_k, len_v = k.size(1), v.size(1)
assert mask.size(1) == len_q
assert mask.size(2) == len_k
residual = q
q = self.w_qs(q).view(batch_size, len_q,
self.n_head, self.block_hidden_dim)
k = self.w_ks(k).view(batch_size, len_k,
self.n_head, self.block_hidden_dim)
v = self.w_vs(v).view(batch_size, len_v,
self.n_head, self.block_hidden_dim)
q = q.permute(2, 0, 1, 3).contiguous().view(
-1, len_q, self.block_hidden_dim) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(
-1, len_k, self.block_hidden_dim) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(
-1, len_v, self.block_hidden_dim) # (n*b) x lv x dv
mask = mask.repeat(self.n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(self.n_head, batch_size,
len_q, self.block_hidden_dim)
output = output.permute(1, 2, 0, 3).contiguous().view(
batch_size, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class DepthwiseSeparableConv(torch.nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = torch.nn.Conv1d(
in_channels=in_ch, out_channels=in_ch,
kernel_size=k, groups=in_ch, padding=k // 2, bias=False)
self.pointwise_conv = torch.nn.Conv1d(
in_channels=in_ch, out_channels=out_ch,
kernel_size=1, padding=0, bias=bias)
def forward(self, x):
x = x.transpose(1, 2)
res = torch.relu(self.pointwise_conv(self.depthwise_conv(x)))
res = res.transpose(1, 2)
return res
class EncoderBlock(torch.nn.Module):
def __init__(self, conv_num, ch_num, k, block_hidden_dim, n_head, dropout):
super().__init__()
self.dropout = dropout
self.convs = torch.nn.ModuleList(
[DepthwiseSeparableConv(ch_num, ch_num, k) for _ in range(
conv_num)])
self.self_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.FFN_1 = torch.nn.Linear(ch_num, ch_num)
self.FFN_2 = torch.nn.Linear(ch_num, ch_num)
self.norm_C = torch.nn.ModuleList(
[torch.nn.LayerNorm(block_hidden_dim) for _ in range(conv_num)])
self.norm_1 = torch.nn.LayerNorm(block_hidden_dim)
self.norm_2 = torch.nn.LayerNorm(block_hidden_dim)
self.conv_num = conv_num
def forward(self, x, mask, layer, blks,
position_encoding_method: str = 'cossine',
position_encoding: torch.Tensor = None):
total_layers = (self.conv_num + 2) * blks
# conv layers
if position_encoding is not None:
out = x + position_encoding
else:
if position_encoding_method == 'cossine':
position_encoder = PosEncoder
else:
raise ValueError("Unkown position encoding method " +
f"{position_encoding_method}")
out = position_encoder(x)
for i, conv in enumerate(self.convs):
res = out
out = self.norm_C[i](out)
if (i) % 2 == 0:
out = F.dropout(out, p=self.dropout, training=self.training)
out = conv(out)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_1(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# self attention
out, _ = self.self_att(out, mask, out, out)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_2(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# fully connected layers
out = self.FFN_1(out)
out = torch.relu(out)
out = self.FFN_2(out)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
return out
def layer_dropout(self, inputs, residual, dropout):
if self.training is True:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return F.dropout(inputs, dropout,
training=self.training) + residual
else:
return inputs + residual
class DecoderBlock(torch.nn.Module):
def __init__(self, ch_num, k, block_hidden_dim, n_head, dropout):
super().__init__()
self.dropout = dropout
self.self_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.obs_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.node_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.FFN_0 = torch.nn.Linear(block_hidden_dim * 2, block_hidden_dim)
self.FFN_1 = torch.nn.Linear(ch_num, ch_num)
self.FFN_2 = torch.nn.Linear(ch_num, ch_num)
self.norm_1 = torch.nn.LayerNorm(block_hidden_dim)
self.norm_2 = torch.nn.LayerNorm(block_hidden_dim)
def forward(self, x, mask, self_att_mask, obs_enc_representations,
obs_mask, node_enc_representations, node_mask, layer, blks):
total_layers = blks * 3
# conv layers
out = PosEncoder(x)
res = out
# self attention
out, _ = self.self_att(out, self_att_mask, out, out)
out_self = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out_self, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_1(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# attention with encoder outputs
out_obs, obs_attention = self.obs_att(
out, obs_mask, obs_enc_representations, obs_enc_representations)
out_node, _ = self.node_att(
out, node_mask, node_enc_representations, node_enc_representations)
out = torch.cat([out_obs, out_node], -1)
out = self.FFN_0(out)
out = torch.relu(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_2(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# Fully connected layers
out = self.FFN_1(out)
out = torch.relu(out)
out = self.FFN_2(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
return out, out_self, out_obs, obs_attention
def layer_dropout(self, inputs, residual, dropout):
if self.training is True:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return F.dropout(inputs, dropout,
training=self.training) + residual
else:
return inputs + residual
class PointerSoftmax(torch.nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.pointer_softmax_context = torch.nn.Linear(input_dim, hidden_dim)
self.pointer_softmax_target = torch.nn.Linear(input_dim, hidden_dim)
self.pointer_softmax_squash = torch.nn.Linear(hidden_dim, 1)
def forward(self, target_target_representations,
target_source_representations, trg_decoder_output,
target_mask, target_source_attention, source_mask,
input_source):
# target_target_representations: batch x target_len x hid
# target_source_representations: batch x target_len x hid
# trg_decoder_output: batch x target len x vocab
# target mask: batch x target len
# target_source_attention: batch x target len x source len
# source mask: batch x source len
# input source: batch x source len
batch_size = target_source_attention.size(0)
target_len = target_source_attention.size(1)
source_len = target_source_attention.size(2)
switch = self.pointer_softmax_context(
target_source_representations) # batch x trg_len x hid
# batch x trg_len x hid
switch = switch + \
self.pointer_softmax_target(target_target_representations)
switch = torch.tanh(switch)
switch = switch * target_mask.unsqueeze(-1)
switch = self.pointer_softmax_squash(
switch).squeeze(-1) # batch x trg_len
switch = torch.sigmoid(switch)
switch = switch * target_mask # batch x target len
switch = switch.unsqueeze(-1) # batch x target len x 1
target_source_attention = target_source_attention * \
source_mask.unsqueeze(1)
from_vocab = trg_decoder_output # batch x target len x vocab
from_source = torch.autograd.Variable(torch.zeros(
batch_size * target_len,
from_vocab.size(-1))) # batch x target len x vocab
if from_vocab.is_cuda:
from_source = from_source.cuda()
input_source = input_source.unsqueeze(1).expand(
batch_size, target_len, source_len)
input_source = input_source.contiguous().view(
batch_size * target_len, -1) # batch*target_len x source_len
from_source = from_source.scatter_add_(
1, input_source, target_source_attention.view(
batch_size * target_len, -1))
# batch x target_len x vocab
from_source = from_source.view(batch_size, target_len, -1)
# batch x target_len x vocab
merged = switch * from_vocab + (1.0 - switch) * from_source
merged = merged * target_mask.unsqueeze(-1)
return merged
def masked_softmax(x: torch.Tensor, m: torch.Tensor = None,
axis: int = -1) -> torch.Tensor:
'''
Softmax with mask (optional)
'''
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-6)
return softmax
class CQAttention(torch.nn.Module):
def __init__(self, block_hidden_dim: int, dropout: float = 0.):
super().__init__()
self.dropout = dropout
w4C = torch.empty(block_hidden_dim, 1)
w4Q = torch.empty(block_hidden_dim, 1)
w4mlu = torch.empty(1, 1, block_hidden_dim)
torch.nn.init.xavier_uniform_(w4C)
torch.nn.init.xavier_uniform_(w4Q)
torch.nn.init.xavier_uniform_(w4mlu)
self.w4C = torch.nn.Parameter(w4C)
self.w4Q = torch.nn.Parameter(w4Q)
self.w4mlu = torch.nn.Parameter(w4mlu)
bias = torch.empty(1)
torch.nn.init.constant_(bias, 0)
self.bias = torch.nn.Parameter(bias)
def forward(self, C: torch.Tensor, Q: torch.Tensor,
Cmask: torch.Tensor, Qmask: torch.Tensor) -> torch.Tensor:
S = self.trilinear_for_attention(C, Q)
Cmask = Cmask.unsqueeze(-1)
Qmask = Qmask.unsqueeze(1)
S1 = masked_softmax(S, Qmask, axis=2)
S2 = masked_softmax(S, Cmask, axis=1)
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out
def trilinear_for_attention(self, C: torch.Tensor,
Q: torch.Tensor) -> torch.Tensor:
C = F.dropout(C, p=self.dropout, training=self.training)
Q = F.dropout(Q, p=self.dropout, training=self.training)
max_q_len = Q.size(-2)
max_context_len = C.size(-2)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, max_q_len])
subres1 = torch.matmul(Q, self.w4Q).transpose(
1, 2).expand([-1, max_context_len, -1])
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
class LSTMCell(torch.nn.Module):
"""A basic LSTM cell."""
def __init__(self, input_size, hidden_size, use_bias=True):
"""
Most parts are copied from torch.nn.LSTMCell.
"""
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.pre_act_linear = torch.nn.Linear(
input_size + hidden_size, 4 * hidden_size, bias=False)
if use_bias:
self.bias_f = torch.nn.Parameter(torch.FloatTensor(hidden_size))
self.bias_iog = torch.nn.Parameter(
torch.FloatTensor(3 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.pre_act_linear.weight.data)
if self.use_bias:
self.bias_f.data.fill_(1.0)
self.bias_iog.data.fill_(0.0)
def get_init_hidden(self, bsz, use_cuda):
h_0 = torch.autograd.Variable(
torch.FloatTensor(bsz, self.hidden_size).zero_())
c_0 = torch.autograd.Variable(
torch.FloatTensor(bsz, self.hidden_size).zero_())
if use_cuda:
h_0, c_0 = h_0.cuda(), c_0.cuda()
return h_0, c_0
def forward(self, input_, mask_=None, h_0=None, c_0=None):
"""
Args:
input_: A (batch, input_size) tensor containing input features.
mask_: (batch)
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
if h_0 is None or c_0 is None:
h_init, c_init = self.get_init_hidden(
input_.size(0), use_cuda=input_.is_cuda)
if h_0 is None:
h_0 = h_init
if c_0 is None:
c_0 = c_init
if mask_ is None:
mask_ = torch.ones_like(torch.sum(input_, -1))
if input_.is_cuda:
mask_ = mask_.cuda()
pre_act = self.pre_act_linear(
torch.cat([input_, h_0], -1)) # batch x 4*hid
if self.use_bias:
pre_act = pre_act + \
torch.cat([self.bias_f, self.bias_iog]).unsqueeze(0)
f, i, o, g = torch.split(
pre_act, split_size_or_sections=self.hidden_size, dim=1)
expand_mask_ = mask_.unsqueeze(1) # batch x 1
c_1 = torch.sigmoid(f) * c_0 + torch.sigmoid(i) * torch.tanh(g)
c_1 = c_1 * expand_mask_ + c_0 * (1 - expand_mask_)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
h_1 = h_1 * expand_mask_ + h_0 * (1 - expand_mask_)
return h_1, c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def masked_mean(x, m=None, dim=1):
"""
mean pooling when there're paddings
input: tensor: batch x time x h
mask: batch x time
output: tensor: batch x h
"""
if m is None:
return torch.mean(x, dim=dim)
x = x * m.unsqueeze(-1)
mask_sum = torch.sum(m, dim=-1) # batch
tmp = torch.eq(mask_sum, 0).float()
if x.is_cuda:
tmp = tmp.cuda()
mask_sum = mask_sum + tmp
res = torch.sum(x, dim=dim) # batch x h
res = res / mask_sum.unsqueeze(-1)
return res
class ObservationDiscriminator(torch.nn.Module):
def __init__(self, n_h):
super(ObservationDiscriminator, self).__init__()
self.f_k = torch.nn.Bilinear(2 * n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, torch.nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_p, p_mask, h_n, n_mask, s_bias1=None, s_bias2=None):
masked_ave_hp = masked_mean(h_p, p_mask)
masked_ave_hn = masked_mean(h_n, n_mask)
sc_1 = self.f_k(c, masked_ave_hp)
sc_2 = self.f_k(c, masked_ave_hn)
logits = torch.cat([sc_1, sc_2], dim=0)
return logits
class DecoderBlockForObsGen(torch.nn.Module):
def __init__(self, ch_num, k, block_hidden_dim, n_head, dropout):
super().__init__()
self.dropout = dropout
self.self_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.obs_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.node_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.FFN_0 = torch.nn.Linear(block_hidden_dim * 2, block_hidden_dim)
self.FFN_1 = torch.nn.Linear(ch_num, ch_num)
self.FFN_2 = torch.nn.Linear(ch_num, ch_num)
self.norm_1 = torch.nn.LayerNorm(block_hidden_dim)
self.norm_2 = torch.nn.LayerNorm(block_hidden_dim)
def forward(self, x, mask, self_att_mask, prev_action_enc_representations, prev_action_mask, node_enc_representations, node_mask, l, blks):
total_layers = blks * 3
# conv layers
out = PosEncoder(x)
res = out
# self attention
out, _ = self.self_att(out, self_att_mask, out, out)
out_self = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out_self, res, self.dropout * float(l) / total_layers)
l += 1
res = out
out = self.norm_1(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# attention with encoder outputs
out_obs, obs_attention = self.obs_att(
out, prev_action_mask, prev_action_enc_representations, prev_action_enc_representations)
out_node, _ = self.node_att(
out, node_mask, node_enc_representations, node_enc_representations)
out = torch.cat([out_obs, out_node], -1)
out = self.FFN_0(out)
out = torch.relu(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(l) / total_layers)
l += 1
res = out
out = self.norm_2(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# Fully connected layers
out = self.FFN_1(out)
out = torch.relu(out)
out = self.FFN_2(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(l) / total_layers)
l += 1
return out, out_self # , out_obs, obs_attention
def layer_dropout(self, inputs, residual, dropout):
if self.training == True:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return F.dropout(inputs, dropout, training=self.training) + residual
else:
return inputs + residual
| 27,037 | 38.761765 | 143 | py |
LTL-GATA | LTL-GATA-main/src/model/__init__.py | from typing import List, Tuple
from argparse import Namespace
import logging
import pdb
import numpy as np
import torch
from utils import max_len, to_pt, pad_sequences
from components import Actions, Vocabulary
from model.features import TextEncoder
from model.layers import LSTMCell
from state import BatchedStates
logger = logging.getLogger()
class ActionNet(torch.nn.Module):
def __init__(self, hidden: int, num_inputs: int, **kwargs) -> None:
super(ActionNet, self).__init__(**kwargs)
self.layers = torch.nn.Sequential(
torch.nn.Linear(hidden * num_inputs, hidden),
torch.nn.ReLU(),
)
self.final = torch.nn.Linear(hidden, 1)
def forward(self, inputs: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
out = self.layers(inputs) * mask.unsqueeze(-1)
return self.final(out).squeeze(-1) * mask
class PolicyNet(torch.nn.Module):
def __init__(self, config: Namespace,
word_vocab: Vocabulary,
ltl_vocab: Vocabulary,
action_vocab: Vocabulary,
pretrain: bool,
graph_updater=None,
context_length: int = 1,
**kwargs) -> None:
super(PolicyNet, self).__init__(**kwargs)
self._dummy = torch.nn.Parameter(torch.empty(0))
# TODO clean this up
self.config = config
for k, v in vars(config.model).items():
setattr(self, k, v)
self.pretrain = pretrain
self.context_length = context_length
self.build(config, word_vocab=word_vocab,
ltl_vocab=ltl_vocab,
action_vocab=action_vocab,
pretrain=pretrain,
graph_updater=graph_updater)
@property
def device(self) -> str:
return self._dummy.device
def load_vocab(self, word_vocab: Vocabulary,
ltl_vocab: Vocabulary,
action_vocab: Vocabulary) -> None:
assert isinstance(word_vocab, Vocabulary)
assert isinstance(ltl_vocab, Vocabulary)
assert isinstance(action_vocab, Vocabulary)
if self.concat_strings:
self.word_vocab = word_vocab
self.ltl_vocab = ltl_vocab
self.action_vocab = action_vocab
concat_vocab = action_vocab
concat_vocab.name = 'concat-vocab'
concat_vocab += ['[ACTION]']
if self.use_observations:
concat_vocab += word_vocab
concat_vocab += ['[OBS]']
if self.use_ltl:
concat_vocab += ltl_vocab
concat_vocab += ['[LTL]']
self.text_encoder = TextEncoder(config=self.config.text_encoder,
vocab=concat_vocab)
else:
self.ltl_vocab = ltl_vocab
self.word_vocab = word_vocab
self.action_vocab = action_vocab
diff_ltl = self.use_ltl and not self.same_ltl_text_encoder
if diff_ltl and self.ltl_encoder is not None:
if len(ltl_vocab) != len(self.ltl_encoder.vocab):
self.ltl_encoder = TextEncoder(
config=self.config.ltl_encoder, vocab=ltl_vocab)
else:
self.ltl_encoder.load_vocab(ltl_vocab)
if self.text_encoder is not None:
if len(word_vocab) != len(self.text_encoder.vocab):
self.text_encoder = TextEncoder(
config=self.config.text_encoder, vocab=word_vocab,)
else:
self.text_encoder.load_vocab(word_vocab)
def build(self, config: Namespace, word_vocab: Vocabulary,
ltl_vocab: Vocabulary, action_vocab: Vocabulary,
pretrain: bool, graph_updater) -> None:
# assert self.action_net_hidden_size == 768 if \
# self.use_pretrained_lm_for_text else True, \
# "Action net hidden size must match BERT output size of 768"
self.text_encoder, self.ltl_encoder, self.graph_encoder, \
self.actions_encoder = None, None, None, None
if self.concat_strings:
self.word_vocab = word_vocab
self.ltl_vocab = ltl_vocab
self.action_vocab = action_vocab
concat_vocab = action_vocab
concat_vocab.name = 'concat-vocab'
concat_vocab += ['[ACTION]']
if self.use_observations:
concat_vocab += word_vocab
concat_vocab += ['[OBS]']
if self.use_ltl:
concat_vocab += ltl_vocab
concat_vocab += ['[LTL]']
self.text_encoder = TextEncoder(config=config.text_encoder,
vocab=concat_vocab)
# 1 for the encoded admissible actions
self.action_network = ActionNet(hidden=self.action_net_hidden_size,
num_inputs=1)
if self.recurrent_memory:
num_inputs = 1
self.recurrent_memory_unit = LSTMCell(
self.action_net_hidden_size * num_inputs,
self.action_net_hidden_size, use_bias=True)
else:
if pretrain:
self.ltl_encoder = TextEncoder(config=config.ltl_encoder,
vocab=ltl_vocab,)
elif self.use_ltl:
# assert not bool(self.same_ltl_text_encoder *
# self.ltl_text_string_concat), \
# "Config violation: 'same_ltl_text_encoder' and " + \
# "'ltl_text_string_concat' can't both be True"
if self.same_ltl_text_encoder:
word_vocab += ltl_vocab
ltl_vocab += word_vocab
else:
# ltl_vocab += word_vocab
self.ltl_encoder = TextEncoder(config=config.ltl_encoder,
vocab=ltl_vocab,)
if self.use_observations:
if pretrain and config.pretrain.text or not pretrain:
self.text_encoder = TextEncoder(config=config.text_encoder,
vocab=word_vocab,)
if self.use_ltl and self.same_ltl_text_encoder and \
not pretrain:
self.ltl_encoder = self.text_encoder
if self.use_belief_graph:
self.graph_encoder = graph_updater
# self.load_vocab(word_vocab, ltl_vocab,)
self.ltl_vocab = ltl_vocab
self.word_vocab = word_vocab
if self.use_independent_actions_encoder:
self.action_vocab = action_vocab
self.actions_encoder = TextEncoder(
config=config.actions_encoder, vocab=action_vocab,)
else:
self.action_vocab = self.word_vocab
self.actions_encoder = self.text_encoder
# 1 for the encoded admissible actions
num_inputs = 1 + np.sum(np.array([
self.use_observations,
self.use_belief_graph,
self.use_ltl]))
self.action_network = ActionNet(hidden=self.action_net_hidden_size,
num_inputs=num_inputs)
if self.recurrent_memory:
num_inputs -= 1
self.recurrent_memory_unit = LSTMCell(
self.action_net_hidden_size * num_inputs,
self.action_net_hidden_size, use_bias=True)
def encode_actions(self, actions: List[List[str]]) -> torch.Tensor:
"""
# actions come out as NumActionsxLengthxEmbed
# stacking gives BatchxNumActionsxLengthxEmbed
@
returns: torch Tensor [batch-size, num-actions, embed-size]
"""
# we first sum over the length (dim=1) then pad the num actions
# since num actions per batch may not be the same size
if self.use_pretrained_lm_for_actions:
actions_mask = list()
unwrapped_actions = list()
batch_size = len(actions)
max_num_action = max_len(actions)
for _actions in actions:
actions_len = len(_actions)
padding_len = (max_num_action - actions_len)
unwrapped_actions.extend(
_actions + [self.actions_encoder.tokenizer.pad_token] *
padding_len)
actions_mask.extend([1] * len(_actions) + [0] * (padding_len))
encoded_actions, _mask = self.actions_encoder(
unwrapped_actions)
max_word_num = _mask.shape[1]
actions_mask = torch.tensor(actions_mask, device=self.device)
encoded_actions = encoded_actions.view(
batch_size, max_num_action, max_word_num, -1)
actions_mask = actions_mask.view(
batch_size, max_num_action)
# batch-size x max-num-action
# batch-size x max-num-action x hid
# _mask = torch.sum(actions_mask, -1)
_mask = actions_mask
encoded_actions = torch.sum(encoded_actions, dim=-2)
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
# batch-size x max-num-action x hid
encoded_actions = encoded_actions / _mask.unsqueeze(-1)
else:
batch_size = len(actions)
max_num_action = max_len(actions)
input_action_candidate_list = list()
for i in range(batch_size):
word_list = [item.split() for item in actions[i]]
word_id_list = [[self.action_vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
word_level = to_pt(input_word, True)
input_action_candidate_list.append(word_level)
max_word_num = max([item.size(1)
for item in input_action_candidate_list])
inputs = torch.zeros(
(batch_size, max_num_action, max_word_num),
device=self.device, dtype=torch.long)
for i in range(batch_size):
j, k = input_action_candidate_list[i].shape
assert j == input_action_candidate_list[i].size(0)
assert k == input_action_candidate_list[i].size(1)
inputs[i, :j, :k] = input_action_candidate_list[i]
inputs = inputs.view(batch_size * max_num_action, max_word_num)
encoded_actions, actions_mask = self.actions_encoder(
inputs, compute_word_ids=False)
if self.actions_encoder.lstm_backbone:
encoded_actions = encoded_actions.view(
batch_size, max_num_action, -1)
elif self.actions_encoder.mlp_backbone:
encoded_actions = encoded_actions.view(
batch_size, max_num_action, -1)
else:
encoded_actions = encoded_actions.view(
batch_size, max_num_action, max_word_num, -1)
encoded_actions = torch.sum(encoded_actions, dim=-2)
actions_mask = actions_mask.view(
batch_size, max_num_action, max_word_num)
# batch-size x max-num-action
_mask = torch.sum(actions_mask, -1)
# batch-size x max-num-action x hid
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
# batch-size x max-num-action x hid
encoded_actions = encoded_actions / _mask.unsqueeze(-1)
actions_mask = actions_mask.byte().any(-1).float()
return encoded_actions, actions_mask
def combine_features(
self, num_actions: int, batch_size: int,
encoded_obs: torch.Tensor, obs_mask: torch.Tensor,
encoded_bg: torch.Tensor, bg_mask: torch.Tensor,
encoded_ltl: torch.Tensor, ltl_mask: torch.Tensor,
previous_hidden: torch.Tensor = None,
previous_cell: torch.Tensor = None) -> torch.Tensor:
if self.concat_features:
encoded_features = None
for name, feature, mask in [
('obs', encoded_obs, obs_mask),
('ltl', encoded_ltl, ltl_mask),
('bg', encoded_bg, bg_mask),
]:
if feature is None:
continue
if name == 'obs':
sumit = self.text_encoder.lstm_backbone or \
self.text_encoder.mlp_backbone if\
self.text_encoder else None
elif name == 'ltl':
sumit = self.ltl_encoder.lstm_backbone or \
self.ltl_encoder.mlp_backbone if\
self.ltl_encoder else None
elif name == 'bg':
sumit = False
# masked mean
# if name == 'obs' and not self.use_pretrained_lm_for_text or \
# name == 'ltl' and not self.use_pretrained_lm_for_ltl:
_mask = torch.sum(mask, -1) # batch
if not sumit:
feature = torch.sum(feature, dim=1) # batch x hid
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
feature = feature / \
_mask.unsqueeze(-1) # batch x hid
# TODO check this for pretraining
# if num_actions > 1:
# feature = torch.stack([feature] * num_actions, dim=1)
if encoded_features is None:
encoded_features = feature
else:
encoded_features = torch.cat([encoded_features, feature],
dim=-1)
else:
logger.critical(
"Concat features is disable but no other " +
"aggregation mechanism exists")
raise RuntimeError(
"Concat features is disable but no other " +
"aggregation mechanism exists")
if self.recurrent_memory:
previous_hidden, previous_cell = \
self.recurrent_memory_unit(encoded_features,
h_0=previous_hidden,
c_0=previous_cell)
return torch.stack([encoded_features] * num_actions, dim=1), \
previous_hidden, previous_cell
def encode(self, states: BatchedStates,
admissible_actions: List[Actions],
previous_hidden: torch.Tensor,
previous_cell: torch.Tensor) -> torch.Tensor:
"""
@returns:
encoded_features: torch Tensor of size
[batch-size, num-action-candidates, embedding-size]
mask: torch Tensor of size [batch-size, num-action-candidates]
used to mask the padded actions from the action network
"""
encoded_obs, obs_mask, encoded_bg, bg_mask, encoded_ltl, ltl_mask = \
tuple([None] * 6)
if self.concat_strings:
# encoded_obs, obs_mask = self.text_encoder([
obs = None
ltl = None
batch_size = len(admissible_actions)
if self.use_observations:
obs = [
' '.join(['[OBS]', obs]) for
obs in states.observations]
if self.use_ltl:
ltl = [
' '.join(['[LTL]', ltl.tokenize()]) for
ltl in states.ltl_formulas]
max_num_action = max_len(admissible_actions)
inputs = list()
final_mask = list()
for i, actions in enumerate(admissible_actions):
pad = [self.text_encoder.vocab.pad_token for _ in range(
max_num_action - len(actions))]
final_mask.extend([1] * len(actions) + [0] * len(pad))
actions += pad
for action in actions:
if obs and ltl:
inputs.append(
' '.join([obs[i], ltl[i], '[ACTION]', action]))
elif obs:
inputs.append(
' '.join([obs[i], '[ACTION]', action]))
elif ltl:
inputs.append(
' '.join([ltl[i], '[ACTION]', action]))
encodings, mask = self.text_encoder(inputs)
_mask = torch.sum(mask, -1)
if not self.text_encoder.lstm_backbone:
encodings = torch.sum(encodings, dim=1) # batch x hid
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
encodings = encodings / \
_mask.unsqueeze(-1) # batch x hid
encodings = encodings.reshape((batch_size, max_num_action,
self.action_net_hidden_size))
final_mask = torch.tensor(final_mask, device=self.device)
final_mask = final_mask.view(batch_size, max_num_action)
if self.recurrent_memory:
previous_hidden, previous_cell = \
self.recurrent_memory_unit(encodings,
h_0=previous_hidden,
c_0=previous_cell)
return encodings, final_mask, previous_hidden, previous_cell
else:
obs = list()
ltls = list()
if self.context_length == 1:
if self.use_ltl:
ltls = [ltl.tokenize() for ltl in states.ltl_formulas]
obs = states.observations
else:
for state in states:
obs.append('<obs> ' + ' <obs> '.join(
[s.observation for s in
state.past[-(self.context_length - 1):] + [state]]
))
ltls.append('<ltl> ' + ' <ltl> '.join(
[s.ltl.tokenize() for s in
state.past[-(self.context_length - 1):] + [state]]
))
if self.use_observations:
encoded_obs, obs_mask = self.text_encoder(obs)
if self.use_ltl:
encoded_ltl, ltl_mask = self.ltl_encoder(ltls)
if self.use_belief_graph:
if self.graph_encoder.real_valued_graph:
encoded_bg, bg_mask = self.graph_encoder.encode_graph(
torch.stack([bg._facts if bg is not None else [] for
bg in states.belief_graphs]))
else:
encoded_bg, bg_mask = self.graph_encoder.encode_graph(
[bg.facts_as_triplets if bg is not None else [] for
bg in states.belief_graphs])
encoded_actions, actions_mask = \
self.encode_actions(admissible_actions)
batch_size, num_actions, _ = encoded_actions.shape
encoded_features, previous_hidden, previous_cell = \
self.combine_features(
num_actions, batch_size,
encoded_obs, obs_mask, encoded_bg, bg_mask,
encoded_ltl, ltl_mask,
previous_hidden, previous_cell)
return (torch.cat([encoded_actions, encoded_features], dim=-1),
actions_mask, previous_hidden, previous_cell)
def compute_inverse_dynamics_loss(self, states: BatchedStates):
obs = states.observations
if len(obs) <= 1:
return None
loss = self.text_encoder.compute_inverse_dynamics_loss(
obs[:-1], obs[1:], states.actions)
return loss
def mlm_loss(self, observations: List[str]) -> torch.Tensor:
loss = None
if self.text_encoder is not None:
loss = self.text_encoder.compute_mlm_loss(observations)
# if self.ltl_encoder is not None:
# loss += self.ltl_encoder.mlm_loss(observations)
return loss
def forward(self, states: BatchedStates, admissible_actions: List[Actions],
previous_hidden: torch.Tensor = None,
previous_cell: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
inputs, mask, previous_hidden, previous_cell = self.encode(
states, admissible_actions, previous_hidden, previous_cell)
return self.action_network(inputs, mask), mask, \
previous_hidden, previous_cell
| 21,064 | 44.301075 | 79 | py |
period_graph | period_graph-master/period_graph/src/sage/mac_mp_queue.py |
# WARNING: Random code copied from off the internet.
# Code copied from https://github.com/keras-team/autokeras/issues/368
import multiprocessing
import multiprocessing.queues
class SharedCounter(object):
""" A synchronized shared counter.
The locking done by multiprocessing.Value ensures that only a single
process or thread may read or write the in-memory ctypes object. However,
in order to do n += 1, Python performs a read followed by a write, so a
second process may read the old value before the new one is written by the
first process. The solution is to use a multiprocessing.Lock to guarantee
the atomicity of the modifications to Value.
This class comes almost entirely from Eli Bendersky's blog:
http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing/
"""
def __init__(self, n = 0):
self.count = multiprocessing.Value('i', n)
def increment(self, n = 1):
""" Increment the counter by n (default = 1) """
with self.count.get_lock():
self.count.value += n
@property
def value(self):
""" Return the value of the counter """
return self.count.value
class MacQueue(multiprocessing.queues.Queue):
""" A portable implementation of multiprocessing.Queue.
Because of multithreading / multiprocessing semantics, Queue.qsize() may
raise the NotImplementedError exception on Unix platforms like Mac OS X
where sem_getvalue() is not implemented. This subclass addresses this
problem by using a synchronized shared counter (initialized to zero) and
increasing / decreasing its value every time the put() and get() methods
are called, respectively. This not only prevents NotImplementedError from
being raised, but also allows us to implement a reliable version of both
qsize() and empty().
"""
def __init__(self, *args, **kwargs):
super(MacQueue, self).__init__(*args, **kwargs)
self.size = SharedCounter(0)
def put(self, *args, **kwargs):
self.size.increment(1)
super(MacQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
self.size.increment(-1)
return super(MacQueue, self).get(*args, **kwargs)
def qsize(self):
""" Reliable implementation of multiprocessing.Queue.qsize() """
return self.size.value
def empty(self):
""" Reliable implementation of multiprocessing.Queue.empty() """
return not self.qsize()
| 2,659 | 35.944444 | 108 | py |
period_graph | period_graph-master/period_graph/src/neural-network/model_bundle.py |
import os
import pickle as pk
from keras.models import load_model
class trivialPCA:
def __init__(self):
pass
def transform(self, x):
return x
class ModelBundle:
def __init__(self, *args, **kwds):
if len(args) == 1:
model_id = args[0]
PCA, MLP, CNN = None, None, None
elif len(args) == 4:
model_id, PCA, MLP, CNN = args
else:
raise NotImplementedError
try:
self._base_network_name = kwds['base_network'].name()
except KeyError:
self._base_network_name = None
self.model_id = model_id
self.PCA = PCA
self.MLP = MLP
self.CNN = CNN
def name(self):
return self.model_id
def base_network_name(self):
return self._base_network_name
def components(self):
return self.PCA, self.MLP, self.CNN
def _load(self, path):
spath = os.path.join(path, self.model_id, '')
try:
self.PCA = pk.load(open(spath+'PCs'+ self.model_id +'.pkl','rb'))
except IOError:
self.PCA = trivialPCA()
self.MLP = load_model(spath+'MLP'+self.model_id+'.h5')
self.CNN = load_model(spath+'CNN'+self.model_id+'.h5')
def save(self, path, also_to_newest=False):
if also_to_newest:
names = [self.model_id, "_newest"]
else:
names = [self.model_id]
for name in names:
spath = os.path.join(path, name, '')
try:
os.mkdir(spath)
except FileExistsError:
pass
pk.dump(self.PCA, open(spath+'PCs'+ name +'.pkl',"wb"))
self.MLP.save(spath+'MLP'+name+'.h5')
self.CNN.save(spath+'CNN'+name+'.h5')
def save_parameters(self, path, setup_dic, params_dic, also_to_newest=False):
# IDEA: Model bundles should perhaps have a header where a dictionary of
# some of these params are kept (such as if it is a finetuned model).
# IDEA: Model bundles could also pickle the parameter dictionary for later.
if also_to_newest:
names = [self.model_id, "_newest"]
else:
names = [self.model_id]
for name in names:
fname = os.path.join(path, name, "Params"+name+".txt")
with open(fname,"w+") as f:
f.write("\n*****************\n")
f.write("Network Training Params for '{}'".format(self.model_id))
f.write("\n\n")
# Print key-value pairs according to special formatting instructions
# Determined by dictionary keys.
B = ["Base network (None if new): " + str(self.base_network_name()),
"",
"Setup parameters:",
tall_dic_str(setup_dic),
"",
"Network architecture hyperparameters:",
tall_dic_str(params_dic), "\n"]
f.write('\n'.join(B))
# strg = ["Network permanent name: ",
# "Fresh network? Else finetuned: ",
# "New network name: ",
# "Num cohomology matrices / pair: ",
# "Balance the training set: ",
# "PCA preprocessing with 23 PCs: ",
# "Training set filename: ",
# "Total time elapsed: ",
# "Reference network (if finetuning): ",
# "Random seed: "]
# B = [s+str(n) for s,n in list(zip(strg,paramsOther))]
print("\nNetwork parameters written to: ",fname,"\n")
return
def save_training_data_info(self, path, data_dic):
"""
Writes information regarding the preparation of the training data to the model folder.
"""
fname = os.path.join(path, self.name(), "TrainDataInfo" + self.name() + ".txt")
notice_msg = ("NOTE: the random seed has no effect on RandomSampler, as there is a "
+ "separate seed set in that sampler. Future improvements might remove "
+ "this issue. For the time being, we will be untroubled by this "
+ "non-critical loss of generality.")
B = ["Data info:", tall_dic_str(data_dic), '', notice_msg, '\n']
with open(fname, 'w') as f:
f.write('\n'.join(B))
return
def evaluate_models(self, data):
test_x,test_y,test_M = data
print("PC PROJECTIONS STARTED")
test_x0 = self.PCA.transform(test_x)
print("PC PROJECTIONS COMPLETE")
# batch size
BSTEST = 10
pNN = self.MLP.predict(test_x0).ravel()
pCN = self.CNN.predict(test_M, batch_size=BSTEST, verbose=1).flatten()
## COMBINED: ENSEMBLE METHOD OF MLP + CNN
# TODO: Is this equivalent after thresholding?
pEN = pCN*pNN
# ranking from highest prob to lowest prob.
ranking = (lambda v : (test_x[(-v).argsort()]).astype(int))
return pCN, ranking(pCN), pNN, ranking(pNN), pEN, ranking(pEN)
# Loader function to reconstruct object.
def load_model_bundle(path, model_id):
B = ModelBundle(model_id)
B._load(path)
return B
def tall_dic_str(D):
max_key_len = max(len(k) for k in D)
format_string = "{0:" + str(max_key_len) + "} : {1},"
s = "\n".join(format_string.format(str(k), str(v)) for k,v in D.items())
return "\n".join(['{', s, '}'])
# Acquire the correct model given the parameters.
def fetch_model(NN_PATH, ReadNewest, UseModel):
"""
Returns the model specified by the input parameters.
"""
MODEL_DIRS = ["SpecialModels", "SavedModels"]
if ReadNewest:
model_path = 'SavedModels'
fname_list = os.listdir(os.path.join(NN_PATH, model_path))
if len(fname_list) == 0:
error_msg = ("No models present in the 'SavedModels' directory. Please either train "
+ "A network using the provided utilities, or use one of the "
+ "presupplied models in the 'SpecialModels' directory.")
raise IOError(error_msg)
else:
key_func = lambda fname : os.path.getmtime(os.path.join(NN_PATH, model_path, fname))
model_name = max(fname_list, key=key_func)
else:
model_name = UseModel
for dir in MODEL_DIRS:
if model_name in os.listdir(NN_PATH + dir):
model_path = dir
break
else:
error_msg = "No model corresponding to '{}' found.".format(UseModel)
raise IOError(error_msg)
return load_model_bundle(os.path.join(NN_PATH, model_path), model_name)
| 7,104 | 32.833333 | 98 | py |
period_graph | period_graph-master/period_graph/src/neural-network/AI_eval.py | # Python 3.7.3.
## THIS FILE saves only to TestingOutputs
import os, sys, scipy.io, scipy.linalg, random, numpy as np
from time import time
###
# In CONFIG
# -- paths
# -- balance
# -- PCA (how many components
# -- number cohomology mats
# -- max-data-size : Read files until file sizes exceeds max-data-size
# -- output : Saved models
# -- output : Saved predictions
# -- hyperparameter config.
from NNCONFIG import *
#######
# Keras import
# We need to do sketchy path stuff when called from sage.
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # don't display warnings; only errors
try:
from keras.models import load_model
except ModuleNotFoundError:
sys.path.insert(0, PYTHON3_LOCAL_SITE_PKG)
from keras.models import load_model
from util import *
from model_bundle import *
from AI_functions import *
from data_handling import *
#**************************************************
# Setup input parameters.
# File containing pretrained networks.
# ModelNum = '_newest' if ReadNewest else UseModel
#**************************************************
# Read in evaluation data.
fnames = sorted(list(dataShape.keys()))
sampler = BasicSampler
data_set = DataSet(SAGE_INPUT_DIR, dataShape, ratio=None)
data_gruppe, is_data_labelled = data_set.read_all()
test_all = ReformatData(data_gruppe,is_data_labelled, NumMats)
test_x,test_y,test_M = test_all # test_y is 'None' if the data is unlabelled.
#**************************************************
# load and evaluate models.
MB = fetch_model(NN_PATH, ReadNewest, UseModel)
dataout = MB.evaluate_models(test_all)
WritePredictionsToSagePipe(NN_PATH, dataout)
| 1,640 | 25.047619 | 77 | py |
period_graph | period_graph-master/period_graph/src/neural-network/AI_functions.py | ###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
# Utilities for creating and fine-tuning neural networks in Keras.
import os, sys, scipy.io, scipy.linalg, time, random, pickle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' #don't display warnings; only errors
import numpy as np, tensorflow as tf, matplotlib.pylab as plt
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from numpy import genfromtxt
#from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
from time import time, asctime
import pickle as pk
import tensorflow as tf
from keras.models import Sequential,load_model
from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
from keras.utils import to_categorical
from keras import optimizers
from util import *
from model_bundle import *
from data_handling import *
def generate_network_name(reference_network=None):
# Old names str(int(time()))
if reference_network == None:
date_components = asctime().replace(':', ',').split()
return '_'.join(date_components[1:4])
else:
return reference_network + '+' + generate_network_name() + '_FT'
def finetune_bundle(old_model_bundle, data, **kwds):
# Parse
try:
EpochNum = kwds['EpochNum']
except KeyError:
EpochNum = 5
try:
BatchSize = kwds['BatchSize']
except KeyError:
BatchSize = 32
try:
Balancing = kwds['Balancing']
except KeyError:
Balancing = True
## Begin actual function code ##
train_x,train_y,train_M = data
if Balancing:
train_x,train_y,train_M = UpSampleToBalance(train_x,train_y,train_M)
saved_pca, saved_NN, saved_CN = old_model_bundle.components()
train_x = saved_pca.transform(train_x)
# Freeze the layers except the last 2 layers
for layer in saved_NN.layers[:-2]:
layer.trainable = False
for layer in saved_CN.layers[:-2]:
layer.trainable = False
#[print(layer, layer.trainable) for layer in saved_CN.layers]
#[print(layer, layer.trainable) for layer in saved_NN.layers]
bs,ep = BatchSize,EpochNum
additional_layer,act = 1024,"relu"
finetuned_NN = MLPFineTune(saved_NN,additional_layer,act)
finetuned_CN = CNNFineTune(saved_CN,additional_layer,act)
print("\n\nSTEP 3f: Fine-tuning Filter 1 (MLP using X,Y)... ")
finetuned_NN.fit(train_x, train_y, batch_size=bs, epochs=ep, verbose=1) # Main MLP-fine-tuning.
print(" ...done.\n")
print("\n\nSTEP 4f: Fine-tuning Filter 2 (CNN using X,Y)... ")
finetuned_CN.fit(train_M, train_y, batch_size=bs, epochs=ep, verbose=1) # Main CNN-fine-tuning
print(" ...done.\n")
return ModelBundle(generate_network_name(old_model_bundle.name()),
saved_pca, finetuned_NN, finetuned_CN,
base_network = old_model_bundle)
####
def train_model_bundle(data, NumMats,
DoPCA = True,
PCAk = 23,
BatchSize = 2000,
EpochNum = 100,
StepSizeMLP = 1e-5,
StepSizeCNN = 1e-5,
Balancing = True):
train_x, train_y, train_M = data
bs, ep = BatchSize, EpochNum
if Balancing:
train_x,train_y,train_M = UpSampleToBalance(train_x,train_y,train_M)
# Substantial data processing.
if DoPCA:
train_x,pca = PerformPCA(PCAk, train_x)
else:
pca = None
# ** SUPERVISED: MULTILAYER PERCEPTRON
print("\n\nSTEP 3: Training Filter 1 (MLP using X,Y)... ")
# hlsizes,numiters,act = (100,1000,1000,1000,1000,100,100), 100, "relu"
hlsizes,numiters,act = (100,1000,1000), 100, "relu"
NN = MLPClassifier0(hlsizes,StepSizeMLP,act,train_x.shape[1])
NN.fit(train_x, train_y, batch_size=bs, epochs=ep, verbose=1) # Main MLP-Training.
print(" ...done.")
# ** SUPERVISED: CONVNET
# hyperparameters are contained in util.py
print("\n\nSTEP 4: Training Filter 2 (CNN using M,Y)... ")
CN = CNNClassifier(NumMats,train_M.shape[1],StepSizeCNN)
CN.fit(train_M, train_y, batch_size=bs, epochs=ep, verbose=1) # Main CNN-Training
print(" ...done.\n")
# ** SAVE WEIGHTS & MODELS
paramsNN,paramsCN = [hlsizes,StepSizeMLP,StepSizeCNN,numiters],[bs,ep]
return ModelBundle(generate_network_name(), pca, NN, CN), paramsNN, paramsCN
###############################################################################################
# Classifier constructors.
def CNNClassifier(k,l,ss):
model = Sequential()
# model.add(Conv2D(22, (3, 3), activation='relu',input_shape=(21, 21, k)))
if l==5:
model.add(Conv2D(64, kernel_size=3, activation='relu',input_shape=(l, l, l)))
elif l==21:
model.add(Conv2D(64, kernel_size=3, activation='relu',input_shape=(l, l, k)))
# model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(Conv2D(16, kernel_size=3, activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) #converts 2D feature maps to 1D feature vectors
model.add(Dense(100, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
sgd = optimizers.SGD(lr=ss, decay=1e-6, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
def MLPClassifier0(hlsizes,ss,act,insz):
model = Sequential()
model.add(Dense(hlsizes[0], input_dim=insz, kernel_initializer="uniform", activation = act))
for i in range(len(hlsizes)-1):
model.add(Dense(hlsizes[i+1], kernel_initializer="uniform", activation=act))
model.add(Dense(1, kernel_initializer="uniform", activation='sigmoid'))
#sgd = optimizers.SGD(lr=ss, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
def CNNFineTune(oldmodel,numlay,act):
model = Sequential()
model.add(oldmodel)
# Add new layers
model.add(Dense(numlay, activation=act))
model.add(Dense(1, activation='sigmoid'))
#sgd = optimizers.SGD(lr=ss, decay=1e-6, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
def MLPFineTune(oldmodel,numlay,act):
model = Sequential()
model.add(oldmodel)
# Add new layers
model.add(Dense(numlay, activation=act))
model.add(Dense(1, activation='sigmoid'))
#sgd = optimizers.SGD(lr=ss, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
| 7,500 | 33.726852 | 99 | py |
evaluate | evaluate-main/setup.py | # Lint as: python3
""" HuggingFace/Evaluate is an open library for evaluation.
Note:
VERSION needs to be formatted following the MAJOR.MINOR.PATCH convention
(we need to follow this convention to be able to retrieve versioned scripts)
Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
0. Prerequisites:
- Dependencies:
- twine: "pip install twine"
- Create an account in (and join the 'evaluate' project):
- PyPI: https://pypi.org/
- Test PyPI: https://test.pypi.org/
1. Change the version in:
- __init__.py
- setup.py
2. Commit these changes: "git commit -m 'Release: VERSION'"
3. Add a tag in git to mark the release: "git tag VERSION -m 'Add tag VERSION for pypi'"
Push the tag to remote: git push --tags origin main
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
First, delete any "build" directory that may exist from previous builds.
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv/notebook by running:
pip install huggingface_hub fsspec aiohttp
pip install -U tqdm
pip install -i https://testpypi.python.org/pypi evaluate
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Fill release notes in the tag in github once everything is looking hunky-dory.
8. Change the version in __init__.py and setup.py to X.X.X+1.dev0 (e.g. VERSION=1.18.3 -> 1.18.4.dev0).
Then push the change with a message 'set dev version'
"""
import os
from setuptools import find_packages, setup
REQUIRED_PKGS = [
# We need datasets as a backend
"datasets>=2.0.0",
# We use numpy>=1.17 to have np.random.Generator (Dataset shuffling)
"numpy>=1.17",
# For smart caching dataset processing
"dill",
# For performance gains with apache arrow
"pandas",
# for downloading datasets over HTTPS
"requests>=2.19.0",
# progress bars in download and scripts
"tqdm>=4.62.1",
# for fast hashing
"xxhash",
# for better multiprocessing
"multiprocess",
# to get metadata of optional dependencies such as torch or tensorflow for Python versions that don't have it
"importlib_metadata;python_version<'3.8'",
# to save datasets locally or on any filesystem
# minimum 2021.05.0 to have the AbstractArchiveFileSystem
"fsspec[http]>=2021.05.0",
# To get datasets from the Datasets Hub on huggingface.co
"huggingface-hub>=0.7.0",
# Utilities from PyPA to e.g., compare versions
"packaging",
"responses<0.19",
]
TEMPLATE_REQUIRE = [
# to populate metric template
"cookiecutter",
# for the gradio widget
"gradio>=3.0.0"
]
EVALUATOR_REQUIRE = [
"transformers",
# for bootstrap computations in Evaluator
"scipy>=1.7.1",
]
TESTS_REQUIRE = [
# test dependencies
"absl-py",
"charcut>=1.1.1", # for charcut_mt
"cer>=1.2.0", # for characTER
"nltk", # for NIST and probably others
"pytest",
"pytest-datadir",
"pytest-xdist",
# optional dependencies
"tensorflow>=2.3,!=2.6.0,!=2.6.1, <=2.10",
"torch",
# metrics dependencies
"accelerate", # for frugalscore (calls transformers' Trainer)
"bert_score>=0.3.6",
"rouge_score>=0.1.2",
"sacrebleu",
"sacremoses",
"scipy",
"seqeval",
"scikit-learn",
"jiwer",
"sentencepiece", # for bleurt
"transformers", # for evaluator
"mauve-text",
"trectools",
# to speed up pip backtracking
"toml>=0.10.1",
"requests_file>=1.5.1",
"tldextract>=3.1.0",
"texttable>=1.6.3",
"unidecode>=1.3.4",
"Werkzeug>=1.0.1",
"six~=1.15.0",
]
QUALITY_REQUIRE = ["black~=22.0", "flake8>=3.8.3", "isort>=5.0.0", "pyyaml>=5.3.1"]
EXTRAS_REQUIRE = {
"tensorflow": ["tensorflow>=2.2.0,!=2.6.0,!=2.6.1"],
"tensorflow_gpu": ["tensorflow-gpu>=2.2.0,!=2.6.0,!=2.6.1"],
"torch": ["torch"],
"dev": TESTS_REQUIRE + QUALITY_REQUIRE,
"tests": TESTS_REQUIRE,
"quality": QUALITY_REQUIRE,
"docs": [
# Might need to add doc-builder and some specific deps in the future
"s3fs",
],
"template": TEMPLATE_REQUIRE,
"evaluator": EVALUATOR_REQUIRE
}
setup(
name="evaluate",
version="0.4.1.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
description="HuggingFace community-driven open-source library of evaluation",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
author="HuggingFace Inc.",
author_email="leandro@huggingface.co",
url="https://github.com/huggingface/evaluate",
download_url="https://github.com/huggingface/evaluate/tags",
license="Apache 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
entry_points={"console_scripts": ["evaluate-cli=evaluate.commands.evaluate_cli:main"]},
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
python_requires=">=3.7.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="metrics machine learning evaluate evaluation",
zip_safe=False, # Required for mypy to find the py.typed file
)
| 6,346 | 31.88601 | 116 | py |
evaluate | evaluate-main/src/evaluate/config.py | import importlib
import os
import platform
from pathlib import Path
from packaging import version
from .utils.logging import get_logger
logger = get_logger(__name__)
# Metrics
S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/metrics/{path}/{name}"
REPO_MEASUREMENTS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/measurements/{path}/{name}"
REPO_COMPARISONS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/comparisons/{path}/{name}"
# Evaluation module types
EVALUATION_MODULE_TYPES = ["metric", "comparison", "measurement"]
# Hub
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
HF_LIST_ENDPOINT = HF_ENDPOINT + "/api/spaces?filter={type}"
HUB_EVALUATE_URL = HF_ENDPOINT + "/spaces/{path}/resolve/{revision}/{name}"
HUB_DEFAULT_VERSION = "main"
PY_VERSION = version.parse(platform.python_version())
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
# General environment variables accepted values for booleans
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
# Imports
PANDAS_VERSION = version.parse(importlib_metadata.version("pandas"))
PYARROW_VERSION = version.parse(importlib_metadata.version("pyarrow"))
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
TORCH_VERSION = "N/A"
TORCH_AVAILABLE = False
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
if TORCH_AVAILABLE:
try:
TORCH_VERSION = version.parse(importlib_metadata.version("torch"))
logger.info(f"PyTorch version {TORCH_VERSION} available.")
except importlib_metadata.PackageNotFoundError:
pass
else:
logger.info("Disabling PyTorch because USE_TF is set")
TF_VERSION = "N/A"
TF_AVAILABLE = False
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
if TF_AVAILABLE:
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for package in [
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"tensorflow-rocm",
"tensorflow-macos",
]:
try:
TF_VERSION = version.parse(importlib_metadata.version(package))
except importlib_metadata.PackageNotFoundError:
continue
else:
break
else:
TF_AVAILABLE = False
if TF_AVAILABLE:
if TF_VERSION.major < 2:
logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
TF_AVAILABLE = False
else:
logger.info(f"TensorFlow version {TF_VERSION} available.")
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
JAX_VERSION = "N/A"
JAX_AVAILABLE = False
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
JAX_AVAILABLE = importlib.util.find_spec("jax") is not None
if JAX_AVAILABLE:
try:
JAX_VERSION = version.parse(importlib_metadata.version("jax"))
logger.info(f"JAX version {JAX_VERSION} available.")
except importlib_metadata.PackageNotFoundError:
pass
else:
logger.info("Disabling JAX because USE_JAX is set to False")
# Cache location
DEFAULT_XDG_CACHE_HOME = "~/.cache"
XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
DEFAULT_HF_EVALUATE_CACHE = os.path.join(HF_CACHE_HOME, "evaluate")
HF_EVALUATE_CACHE = Path(os.getenv("HF_EVALUATE_CACHE", DEFAULT_HF_EVALUATE_CACHE))
DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
DOWNLOADED_DATASETS_DIR = "downloads"
DEFAULT_DOWNLOADED_EVALUATE_PATH = os.path.join(HF_EVALUATE_CACHE, DOWNLOADED_DATASETS_DIR)
DOWNLOADED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_EVALUATE_PATH", DEFAULT_DOWNLOADED_EVALUATE_PATH))
EXTRACTED_EVALUATE_DIR = "extracted"
DEFAULT_EXTRACTED_EVALUATE_PATH = os.path.join(DEFAULT_DOWNLOADED_EVALUATE_PATH, EXTRACTED_EVALUATE_DIR)
EXTRACTED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_EVALUATE_PATH", DEFAULT_EXTRACTED_EVALUATE_PATH))
# Download count for the website
HF_UPDATE_DOWNLOAD_COUNTS = (
os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
)
# Offline mode
HF_EVALUATE_OFFLINE = os.environ.get("HF_EVALUATE_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
# File names
LICENSE_FILENAME = "LICENSE"
METRIC_INFO_FILENAME = "metric_info.json"
DATASETDICT_JSON_FILENAME = "dataset_dict.json"
MODULE_NAME_FOR_DYNAMIC_MODULES = "evaluate_modules"
HF_HUB_ALLOWED_TASKS = [
"image-classification",
"translation",
"image-segmentation",
"fill-mask",
"automatic-speech-recognition",
"token-classification",
"sentence-similarity",
"audio-classification",
"question-answering",
"summarization",
"zero-shot-classification",
"table-to-text",
"feature-extraction",
"other",
"multiple-choice",
"text-classification",
"text-to-image",
"text2text-generation",
"zero-shot-image-classification",
"tabular-classification",
"tabular-regression",
"image-to-image",
"tabular-to-text",
"unconditional-image-generation",
"text-retrieval",
"text-to-speech",
"object-detection",
"audio-to-audio",
"text-generation",
"conversational",
"table-question-answering",
"visual-question-answering",
"image-to-text",
"reinforcement-learning",
"voice-activity-detection",
"time-series-forecasting",
"document-question-answering",
]
| 6,648 | 33.450777 | 118 | py |
evaluate | evaluate-main/src/evaluate/evaluator/base.py | # Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Union
# Lint as: python3
from datasets import Dataset, load_dataset
from evaluate.evaluator.utils import choose_split
try:
from scipy.stats import bootstrap
SCIPY_AVAILABLE = True
except ImportError:
SCIPY_AVAILABLE = False
try:
import transformers
from transformers import Pipeline, pipeline
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
from time import perf_counter
from typing_extensions import Literal
from ..loading import load
from ..module import EvaluationModule
from ..utils.logging import get_logger
from .utils import DatasetColumn
logger = get_logger(__name__)
EVALUTOR_COMPUTE_START_DOCSTRING = r"""
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
"""
EVALUATOR_COMPUTE_RETURN_DOCSTRING = r"""
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
"""
class Evaluator(ABC):
"""
The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across
different evaluators.
Base class implementing evaluator operations.
"""
PIPELINE_KWARGS = {}
METRIC_KWARGS = {}
def __init__(self, task: str, default_metric_name: str = None):
if not TRANSFORMERS_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`."
)
if not SCIPY_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`."
)
self.task = task
self.default_metric_name = default_metric_name
@staticmethod
def _compute_confidence_interval(
metric,
metric_inputs,
metric_keys: List[str],
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
) -> Dict[str, Any]:
"""
A utility function enabling the confidence interval calculation for metrics computed
by the evaluator based on `scipy`'s `bootstrap` method.
"""
# bootstrap only works with functions that use args and no kwargs
def build_args_metric(metric, key, **kwargs):
def args_metric(*args):
return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key]
return args_metric
bootstrap_dict = {}
for key in metric_keys:
bs = bootstrap(
data=list(metric_inputs.values()),
statistic=build_args_metric(metric, key, **metric_inputs),
paired=True,
vectorized=False,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
bootstrap_dict[key] = {
"confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high),
"standard_error": bs.standard_error,
}
return bootstrap_dict
@staticmethod
def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]:
"""
A utility function computing time performance metrics:
- `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds,
- `samples_per_second` - pipeline throughput in the number of samples per second.
- `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample,
"""
latency = end_time - start_time
throughput = num_samples / latency
latency_sample = 1.0 / throughput
return {
"total_time_in_seconds": latency,
"samples_per_second": throughput,
"latency_in_seconds": latency_sample,
}
@staticmethod
def _infer_device() -> int:
"""Helper function to check if GPU or CPU is available for inference."""
# try infer with torch first
try:
import torch
if torch.cuda.is_available():
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
# if not available try TF
try:
import tensorflow as tf
if len(tf.config.list_physical_devices("GPU")) > 0:
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
device = -1
if device == -1:
logger.info("No GPU found. The default device for pipeline inference is set to CPU.")
else:
logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).")
return device
@abstractmethod
def predictions_processor(self, *args, **kwargs):
"""
A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric.
"""
raise NotImplementedError()
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Dict[str, float]:
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
# TODO: To clarify why `wer` and `cer` return float
# even though metric.compute contract says that it
# returns Optional[dict].
if type(metric_results) == float:
metric_results = {metric.name: metric_results}
result.update(metric_results)
result.update(perf_results)
return result
@staticmethod
def check_for_mismatch_in_device_setup(device, model_or_pipeline):
if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline):
if model_or_pipeline.device.type == "cpu":
raise ValueError(
"The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an "
"accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during "
"initialization to use an accelerator, or pass `device=None` to `compute`. "
)
elif device != model_or_pipeline.device.index:
raise ValueError(
f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`."
)
def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]):
"""
Ensure the columns required for the evaluation are present in the dataset.
Args:
data (`str` or [`Dataset`]):
Specifies the dataset we will run evaluation on.
columns_names (`List[str]`):
List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method,
while the values are the column names to check.
Example:
```py
>>> from datasets import load_dataset
>>> from evaluate import evaluator
>>> data = load_dataset("rotten_tomatoes', split="train")
>>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"})
```
"""
for input_name, column_name in columns_names.items():
if column_name not in data.column_names:
raise ValueError(
f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}."
)
@staticmethod
def get_dataset_split(data, subset=None, split=None):
"""
Infers which split to use if `None` is given.
Args:
data (`str`):
Name of dataset.
subset (`str`):
Name of config for datasets with multiple configurations (e.g. 'glue/cola').
split (`str`, defaults to `None`):
Split to use.
Returns:
`split`: `str` containing which split to use
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes")
WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST
'test'
```
"""
if split is None:
split = choose_split(data, subset)
logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}")
return split
def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None):
"""
Load dataset with given subset and split.
Args:
data ([`Dataset`] or `str`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of
type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Specifies dataset subset to be passed to `name` in `load_dataset`. To be
used with datasets with several configurations (e.g. glue/sst2).
split (`str`, defaults to `None`):
User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`).
If not defined and data is a `str` type, will automatically select the best one via `choose_split()`.
Returns:
data ([`Dataset`]): Loaded dataset which will be used for evaluation.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train")
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
"""
if isinstance(data, str):
split = self.get_dataset_split(data, subset, split)
data = load_dataset(data, name=subset, split=split)
return data
elif isinstance(data, Dataset):
if split is not None or subset is not None:
logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.")
return data
else:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs):
"""
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column(`str`, *optional*):
The name of the column containing the second text feature if there is one. Otherwise, set to `None`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
Example:
```py
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label")
```
"""
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
return {"references": data[label_column]}, DatasetColumn(data, input_column)
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
"""
Prepare pipeline.
Args:
model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`):
Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
Returns:
The initialized pipeline.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased")
```
"""
if device is None:
device = self._infer_device()
if (
isinstance(model_or_pipeline, str)
or isinstance(model_or_pipeline, transformers.PreTrainedModel)
or isinstance(model_or_pipeline, transformers.TFPreTrainedModel)
):
pipe = pipeline(
self.task,
model=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
else:
if model_or_pipeline is None:
pipe = pipeline(self.task, device=device)
else:
pipe = model_or_pipeline
if tokenizer is not None and feature_extractor is not None:
logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).")
if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")):
raise ValueError(
f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task."
)
return pipe
def prepare_metric(self, metric: Union[str, EvaluationModule]):
"""
Prepare metric.
Args:
metric (`str` or [`EvaluationModule`], defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
Returns:
The loaded metric.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_metric("accuracy")
```
"""
# Prepare metric.
if metric is None:
if self.default_metric_name is None:
raise ValueError(
"`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument."
)
metric = load(self.default_metric_name)
elif isinstance(metric, str):
metric = load(metric)
return metric
def call_pipeline(self, pipe, *args, **kwargs):
start_time = perf_counter()
pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
end_time = perf_counter()
return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output))
def compute_metric(
self,
metric: EvaluationModule,
metric_inputs: Dict,
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
):
"""Compute and return metrics."""
result = metric.compute(**metric_inputs, **self.METRIC_KWARGS)
if strategy == "bootstrap":
metric_keys = result.keys()
bootstrap_dict = self._compute_confidence_interval(
metric,
metric_inputs,
metric_keys,
confidence_level,
n_resamples,
random_state,
)
for key in metric_keys:
bootstrap_dict[key]["score"] = result[key]
return bootstrap_dict
return result
| 22,881 | 40.985321 | 178 | py |
evaluate | evaluate-main/src/evaluate/utils/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import copy
import io
import json
import os
import posixpath
import re
import shutil
import sys
import tempfile
import time
import urllib
from contextlib import closing, contextmanager
from functools import partial
from hashlib import sha256
from pathlib import Path
from typing import List, Optional, Type, TypeVar, Union
from urllib.parse import urljoin, urlparse
import requests
from datasets import DownloadConfig
from datasets.utils.extract import ExtractManager
from datasets.utils.filelock import FileLock
from .. import __version__, config
from . import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
INCOMPLETE_SUFFIX = ".incomplete"
T = TypeVar("T", str, Path)
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
"""
Add hf_modules_cache to the python path.
By default hf_modules_cache='~/.cache/huggingface/modules'.
It can also be set with the environment variable HF_MODULES_CACHE.
This is used to add modules such as `datasets_modules`
"""
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
hf_modules_cache = str(hf_modules_cache)
if hf_modules_cache not in sys.path:
sys.path.append(hf_modules_cache)
os.makedirs(hf_modules_cache, exist_ok=True)
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
pass
return hf_modules_cache
def is_remote_url(url_or_filename: str) -> bool:
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp")
def is_local_path(url_or_filename: str) -> bool:
# On unix the scheme of a local path is empty (for both absolute and relative),
# while on windows the scheme is the drive name (ex: "c") for absolute paths.
# for details on the windows behavior, see https://bugs.python.org/issue42215
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_relative_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
def relative_to_absolute_path(path: T) -> T:
"""Convert relative path to absolute path."""
abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path))))
return Path(abs_path_str) if isinstance(path, Path) else abs_path_str
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
else:
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def head_hf_s3(
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
) -> Union[requests.Response, Exception]:
return http_head(
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
max_retries=max_retries,
)
def hf_hub_url(path: str, name: str, revision: Optional[str] = None) -> str:
revision = revision or config.HUB_DEFAULT_VERSION
return config.HUB_EVALUATE_URL.format(path=path, name=name, revision=revision)
def url_or_path_join(base_name: str, *pathnames: str) -> str:
if is_remote_url(base_name):
return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames))
else:
return Path(base_name, *pathnames).as_posix()
def url_or_path_parent(url_or_path: str) -> str:
if is_remote_url(url_or_path):
return url_or_path[: url_or_path.rindex("/")]
else:
return os.path.dirname(url_or_path)
def hash_url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
def cached_path(
url_or_filename,
download_config=None,
**download_kwargs,
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
ValueError: if it couldn't parse the url or filename correctly
requests.exceptions.ConnectionError: in case of internet connection issue
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or config.DOWNLOADED_EVALUATE_PATH
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
use_etag=download_config.use_etag,
max_retries=download_config.max_retries,
use_auth_token=download_config.use_auth_token,
ignore_url_params=download_config.ignore_url_params,
download_desc=download_config.download_desc,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif is_local_path(url_or_filename):
# File, but it doesn't exist.
raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if output_path is None:
return output_path
if download_config.extract_compressed_file:
output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
output_path, force_extract=download_config.force_extract
)
return output_path
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = f"datasets/{__version__}; python/{config.PY_VERSION}"
ua += f"; pyarrow/{config.PYARROW_VERSION}"
if config.TORCH_AVAILABLE:
ua += f"; torch/{config.TORCH_VERSION}"
if config.TF_AVAILABLE:
ua += f"; tensorflow/{config.TF_VERSION}"
if config.JAX_AVAILABLE:
ua += f"; jax/{config.JAX_VERSION}"
if isinstance(user_agent, dict):
ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}"
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict:
"""Handle the HF authentication"""
headers = {}
if url.startswith(config.HF_ENDPOINT):
token = None
if isinstance(use_auth_token, str):
token = use_auth_token
elif bool(use_auth_token):
from huggingface_hub import hf_api
token = hf_api.HfFolder.get_token()
if token:
headers["authorization"] = f"Bearer {token}"
return headers
class OfflineModeIsEnabled(ConnectionError):
pass
def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
"""Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_EVALUATE_OFFLINE is True."""
if config.HF_EVALUATE_OFFLINE:
raise OfflineModeIsEnabled(
"Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
)
def _retry(
func,
func_args: Optional[tuple] = None,
func_kwargs: Optional[dict] = None,
exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException,
status_codes: Optional[List[int]] = None,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
):
func_args = func_args or ()
func_kwargs = func_kwargs or {}
retry = 0
while True:
try:
return func(*func_args, **func_kwargs)
except exceptions as err:
if retry >= max_retries or (status_codes and err.response.status_code not in status_codes):
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]")
time.sleep(sleep_time)
retry += 1
def _request_with_retry(
method: str,
url: str,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
timeout: float = 10.0,
**params,
) -> requests.Response:
"""Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
Note that if the environment variable HF_EVALUATE_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
Args:
method (str): HTTP method, such as 'GET' or 'HEAD'.
url (str): The URL of the resource to fetch.
max_retries (int): Maximum number of retries, defaults to 0 (no retries).
base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
retries then grows exponentially, capped by max_wait_time.
max_wait_time (float): Maximum amount of time between two retries, in seconds.
**params: Params to pass to :obj:`requests.request`.
"""
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
tries, success = 0, False
while not success:
tries += 1
try:
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
success = True
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
if tries > max_retries:
raise err
else:
logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
time.sleep(sleep_time)
return response
def ftp_head(url, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
r.read(1)
except Exception:
return False
return True
def ftp_get(url, temp_file, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
logger.info(f"Getting through FTP {url} into {temp_file.name}")
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
shutil.copyfileobj(r, temp_file)
except urllib.error.URLError as e:
raise ConnectionError(e) from None
def http_get(
url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None
):
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
if resume_size > 0:
headers["Range"] = f"bytes={resume_size:d}-"
response = _request_with_retry(
method="GET",
url=url,
stream=True,
proxies=proxies,
headers=headers,
cookies=cookies,
max_retries=max_retries,
timeout=timeout,
)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
with logging.tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc=desc or "Downloading",
disable=not logging.is_progress_bar_enabled(),
) as progress:
for chunk in response.iter_content(chunk_size=1024):
progress.update(len(chunk))
temp_file.write(chunk)
def http_head(
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
) -> requests.Response:
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
response = _request_with_retry(
method="HEAD",
url=url,
proxies=proxies,
headers=headers,
cookies=cookies,
allow_redirects=allow_redirects,
timeout=timeout,
max_retries=max_retries,
)
return response
def request_etag(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> Optional[str]:
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
response = http_head(url, headers=headers, max_retries=3)
response.raise_for_status()
etag = response.headers.get("ETag") if response.ok else None
return etag
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=100,
resume_download=False,
user_agent=None,
local_files_only=False,
use_etag=True,
max_retries=0,
use_auth_token=None,
ignore_url_params=False,
download_desc=None,
) -> str:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
"""
if cache_dir is None:
cache_dir = config.HF_EVALUATE_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if ignore_url_params:
# strip all query parameters and #fragments from the URL
cached_url = urljoin(url, urlparse(url).path)
else:
cached_url = url # additional parameters may be added to the given URL
connected = False
response = None
cookies = None
etag = None
head_error = None
# Try a first time to file the file on the local file system without eTag (None)
# if we don't ask for 'force_download' then we spare a request
filename = hash_url_to_filename(cached_url, etag=None)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download and not use_etag:
return cache_path
# Prepare headers for authentication
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
if user_agent is not None:
headers["user-agent"] = user_agent
# We don't have the file locally or we need an eTag
if not local_files_only:
if url.startswith("ftp://"):
connected = ftp_head(url)
try:
response = http_head(
url,
allow_redirects=True,
proxies=proxies,
timeout=etag_timeout,
max_retries=max_retries,
headers=headers,
)
if response.status_code == 200: # ok
etag = response.headers.get("ETag") if use_etag else None
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# Fix Google Drive URL to avoid Virus scan warning
if "drive.google.com" in url and "confirm=" not in url:
url += "&confirm=t"
# In some edge cases, head request returns 400 but the connection is actually ok
elif (
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
or (response.status_code == 405 and "drive.google.com" in url)
or (
response.status_code == 403
and (
re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
)
)
or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
):
connected = True
logger.info(f"Couldn't get ETag version for url {url}")
elif response.status_code == 401 and config.HF_ENDPOINT in url and use_auth_token is None:
raise ConnectionError(
f"Unauthorized for URL {url}. Please use the parameter ``use_auth_token=True`` after logging in with ``huggingface-cli login``"
)
except (OSError, requests.exceptions.Timeout) as e:
# not connected
head_error = e
pass
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path) and not force_download:
return cache_path
if local_files_only:
raise FileNotFoundError(
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
" disabled. To enable file online look-ups, set 'local_files_only' to False."
)
elif response is not None and response.status_code == 404:
raise FileNotFoundError(f"Couldn't find file at {url}")
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
if head_error is not None:
raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
elif response is not None:
raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
else:
raise ConnectionError(f"Couldn't reach {url}")
# Try a second time
filename = hash_url_to_filename(cached_url, etag)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download:
return cache_path
# From now on, connected is True.
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
# GET file object
if url.startswith("ftp://"):
ftp_get(url, temp_file)
else:
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
headers=headers,
cookies=cookies,
max_retries=max_retries,
desc=download_desc,
)
logger.info(f"storing {url} in cache at {cache_path}")
shutil.move(temp_file.name, cache_path)
logger.info(f"creating metadata file for {cache_path}")
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
return cache_path
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr)
return fn
return docstring_decorator
def estimate_dataset_size(paths):
return sum(path.stat().st_size for path in paths)
def readline(f: io.RawIOBase):
# From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
res = bytearray()
while True:
b = f.read(1)
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
| 22,602 | 35.515347 | 147 | py |
evaluate | evaluate-main/metrics/perplexity/perplexity.py | # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perplexity Metric."""
import datasets
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import evaluate
from evaluate import logging
_CITATION = """\
"""
_DESCRIPTION = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence, calculated with exponent base `e`.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_KWARGS_DESCRIPTION = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
predictions (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = evaluate.load("perplexity", module_type="metric")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... predictions=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 0))
647.0
>>> print(round(results["perplexities"][0], 0))
32.0
Example 2:
>>> from datasets import load_dataset
>>> perplexity = evaluate.load("perplexity", module_type="metric")
>>> input_texts = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")["text"][:10] # doctest: +SKIP
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... predictions=input_texts)
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 2)) # doctest: +SKIP
576.76
>>> print(round(results["perplexities"][0], 2)) # doctest: +SKIP
889.28
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Perplexity(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string"),
}
),
reference_urls=["https://huggingface.co/docs/transformers/perplexity"],
)
def _compute(
self, predictions, model_id, batch_size: int = 16, add_start_token: bool = True, device=None, max_length=None
):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
device = "cuda"
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(model_id)
model = model.to(device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
existing_special_tokens = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(existing_special_tokens) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
if add_start_token and max_length:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
max_tokenized_len = max_length - 1
else:
max_tokenized_len = max_length
encodings = tokenizer(
predictions,
add_special_tokens=False,
padding=True,
truncation=True if max_tokenized_len else False,
max_length=max_tokenized_len,
return_tensors="pt",
return_attention_mask=True,
).to(device)
encoded_texts = encodings["input_ids"]
attn_masks = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1), 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1), 2)
), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
ppls = []
loss_fct = CrossEntropyLoss(reduction="none")
for start_index in logging.tqdm(range(0, len(encoded_texts), batch_size)):
end_index = min(start_index + batch_size, len(encoded_texts))
encoded_batch = encoded_texts[start_index:end_index]
attn_mask = attn_masks[start_index:end_index]
if add_start_token:
bos_tokens_tensor = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(device)
encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
attn_mask = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(device), attn_mask], dim=1
)
labels = encoded_batch
with torch.no_grad():
out_logits = model(encoded_batch, attention_mask=attn_mask).logits
shift_logits = out_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_attention_mask_batch = attn_mask[..., 1:].contiguous()
perplexity_batch = torch.exp(
(loss_fct(shift_logits.transpose(1, 2), shift_labels) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1)
)
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(ppls)}
| 8,460 | 42.839378 | 200 | py |
evaluate | evaluate-main/metrics/frugalscore/frugalscore.py | # Copyright 2022 The HuggingFace Datasets Authors and the current metric script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FrugalScore metric."""
import datasets
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
import evaluate
_CITATION = """\
@article{eddine2021frugalscore,
title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation},
author={Eddine, Moussa Kamal and Shang, Guokan and Tixier, Antoine J-P and Vazirgiannis, Michalis},
journal={arXiv preprint arXiv:2110.08559},
year={2021}
}
"""
_DESCRIPTION = """\
FrugalScore is a reference-based metric for NLG models evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.
"""
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores.
Args:
predictions (list of str): list of predictions to score. Each predictions
should be a string.
references (list of str): list of reference for each prediction. Each
reference should be a string.
batch_size (int): the batch size for predictions.
max_length (int): maximum sequence length.
device (str): either gpu or cpu
Returns:
scores (list of int): list of scores.
Examples:
>>> frugalscore = evaluate.load("frugalscore")
>>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face'])
>>> print([round(s, 3) for s in results["scores"]])
[0.631, 0.645]
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class FRUGALSCORE(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}
),
homepage="https://github.com/moussaKam/FrugalScore",
)
def _download_and_prepare(self, dl_manager):
if self.config_name == "default":
checkpoint = "moussaKam/frugalscore_tiny_bert-base_bert-score"
else:
checkpoint = self.config_name
self.model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
self.tokenizer = AutoTokenizer.from_pretrained(checkpoint)
def _compute(
self,
predictions,
references,
batch_size=32,
max_length=128,
device=None,
):
"""Returns the scores"""
assert len(predictions) == len(
references
), "predictions and references should have the same number of sentences."
if device is not None:
assert device in ["gpu", "cpu"], "device should be either gpu or cpu."
else:
device = "gpu" if torch.cuda.is_available() else "cpu"
training_args = TrainingArguments(
"trainer",
fp16=(device == "gpu"),
per_device_eval_batch_size=batch_size,
report_to="all",
no_cuda=(device == "cpu"),
log_level="warning",
)
dataset = {"sentence1": predictions, "sentence2": references}
raw_datasets = datasets.Dataset.from_dict(dataset)
def tokenize_function(data):
return self.tokenizer(
data["sentence1"], data["sentence2"], max_length=max_length, truncation=True, padding=True
)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
tokenized_datasets.remove_columns(["sentence1", "sentence2"])
trainer = Trainer(self.model, training_args, tokenizer=self.tokenizer)
predictions = trainer.predict(tokenized_datasets)
return {"scores": list(predictions.predictions.squeeze(-1))}
| 4,613 | 38.101695 | 231 | py |
evaluate | evaluate-main/metrics/comet/comet.py | # Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" COMET metric.
Requirements:
pip install unbabel-comet
Usage:
```python
from evaluate import load
comet_metric = load('metrics/comet/comet.py')
#comet_metric = load('comet')
#comet_metric = load('comet', 'Unbabel/wmt20-comet-da')
source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
predictions = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
predictions['scores']
```
"""
import comet # From: unbabel-comet
import datasets
import torch
from packaging import version
import evaluate
logger = evaluate.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{rei-etal-2022-comet,
title = "{COMET}-22: Unbabel-{IST} 2022 Submission for the Metrics Shared Task",
author = "Rei, Ricardo and
C. de Souza, Jos{\'e} G. and
Alves, Duarte and
Zerva, Chrysoula and
Farinha, Ana C and
Glushkova, Taisiya and
Lavie, Alon and
Coheur, Luisa and
Martins, Andr{\'e} F. T.",
booktitle = "Proceedings of the Seventh Conference on Machine Translation (WMT)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.wmt-1.52",
pages = "578--585",
}
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
"""
_DESCRIPTION = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
_KWARGS_DESCRIPTION = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`gpus` (bool): Number of GPUs to use. 0 for CPU
`progress_bar` (bool): Flag that turns on and off the predict progress bar. Defaults to True
Returns:
Dict with all sentence-level scores (`scores` key) a system-level score (`mean_score` key).
Examples:
>>> comet_metric = evaluate.load('comet')
>>> # comet_metric = load('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 3) for v in results["scores"]])
[0.839, 0.972]
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class COMET(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://unbabel.github.io/COMET/html/index.html",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"sources": datasets.Value("string", id="sequence"),
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/Unbabel/COMET"],
reference_urls=[
"https://github.com/Unbabel/COMET",
"https://aclanthology.org/2022.wmt-1.52/",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
],
)
def _download_and_prepare(self, dl_manager):
if self.config_name == "default":
if version.parse(comet.__version__) >= version.parse("2.0.0"):
self.scorer = comet.load_from_checkpoint(comet.download_model("Unbabel/wmt22-comet-da"))
else:
self.scorer = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
else:
self.scorer = comet.load_from_checkpoint(comet.download_model(self.config_name))
def _compute(self, sources, predictions, references, gpus=None, progress_bar=False):
if gpus is None:
gpus = 1 if torch.cuda.is_available() else 0
data = {"src": sources, "mt": predictions, "ref": references}
data = [dict(zip(data, t)) for t in zip(*data.values())]
if version.parse(comet.__version__) >= version.parse("2.0.0"):
output = self.scorer.predict(data, gpus=gpus, progress_bar=progress_bar)
scores, mean_score = output.scores, output.system_score
else:
scores, mean_score = self.scorer.predict(data, gpus=gpus, progress_bar=progress_bar)
return {"mean_score": mean_score, "scores": scores}
| 6,967 | 39.511628 | 238 | py |
evaluate | evaluate-main/tests/test_evaluator.py | # Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from time import sleep
from unittest import TestCase, mock
import numpy as np
from datasets import ClassLabel, Dataset, Features, Sequence, Value
from PIL import Image
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
AutoModelForImageClassification,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoTokenizer,
pipeline,
)
from evaluate import (
AudioClassificationEvaluator,
AutomaticSpeechRecognitionEvaluator,
Evaluator,
ImageClassificationEvaluator,
QuestionAnsweringEvaluator,
Text2TextGenerationEvaluator,
TextClassificationEvaluator,
TextGenerationEvaluator,
TokenClassificationEvaluator,
evaluator,
load,
)
from .utils import slow
class DummyTextGenerationPipeline:
def __init__(self, prefix="generated", task="text-generation", num_return_sequences=1):
self.task = task
self.prefix = prefix
self.num_return_sequences = num_return_sequences
def __call__(self, inputs, **kwargs):
return [[{f"{self.prefix}_text": "Lorem ipsum"} for _ in range(self.num_return_sequences)] for _ in inputs]
class DummyText2TextGenerationPipeline:
def __init__(self, prefix="generated", task="text2text-generation"):
self.task = task
self.prefix = prefix
def __call__(self, inputs, **kwargs):
return [{f"{self.prefix}_text": "Lorem ipsum"} for _ in inputs]
class DummyTextClassificationPipeline:
def __init__(self, sleep_time=None):
self.task = "text-classification"
self.sleep_time = sleep_time
def __call__(self, inputs, **kwargs):
if self.sleep_time is not None:
sleep(self.sleep_time)
return [{"label": "NEGATIVE"} if i % 2 == 1 else {"label": "POSITIVE"} for i, _ in enumerate(inputs)]
class DummyImageClassificationPipeline:
def __init__(self):
self.task = "image-classification"
def __call__(self, images, **kwargs):
return [[{"score": 0.9, "label": "yurt"}, {"score": 0.1, "label": "umbrella"}] for i, _ in enumerate(images)]
class DummyQuestionAnsweringPipeline:
def __init__(self, v2: bool):
self.task = "question-answering"
self.v2 = v2
def __call__(self, question, context, **kwargs):
if self.v2:
return [
{"score": 0.95, "start": 31, "end": 39, "answer": "Felix"}
if i % 2 == 0
else {"score": 0.95, "start": 0, "end": 0, "answer": ""}
for i in range(len(question))
]
else:
return [{"score": 0.95, "start": 31, "end": 39, "answer": "Felix"} for _ in question]
class DummyTokenClassificationPipeline:
def __init__(self):
self.task = "token-classification"
def __call__(self, inputs, **kwargs):
result = [
{"start": 0, "entity": "B-LOC"},
{"start": 2, "entity": "I-LOC"},
{"start": 4, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
return [result]
class DummyAutomaticSpeechRecognitionPipeline:
def __init__(self) -> None:
self.task = "automatic-speech-recognition"
def __call__(self, inputs, **kwargs):
return [{"text": "Lorem ipsum"} for _ in inputs]
class DummyAudioClassificationPipeline:
def __init__(self):
self.task = "audio-classification"
def __call__(self, audio, **kwargs):
return [[{"score": 0.9, "label": "yes"}, {"score": 0.1, "label": "no"}] for i, _ in enumerate(audio)]
class TestEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]})
self.default_ckpt = "hf-internal-testing/tiny-random-bert"
self.default_model = AutoModelForSequenceClassification.from_pretrained(self.default_ckpt, num_labels=2)
self.default_tokenizer = AutoTokenizer.from_pretrained(self.default_ckpt)
self.pipe = pipeline("text-classification", model=self.default_model, tokenizer=self.default_tokenizer)
self.evaluator = evaluator("text-classification")
self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]})
self.label_mapping = {"LABEL_0": 0.0, "LABEL_1": 1.0}
def test_wrong_task(self):
self.assertRaises(KeyError, evaluator, "bad_task")
def test_device_placement(self):
orig_import = __import__
pt_mock = mock.Mock()
tf_mock = mock.Mock()
# mock import of torch and tensorflow
def import_pt_tf_mock(name, *args):
if name == "torch":
if pt_available:
return pt_mock
else:
raise ImportError
if name == "tensorflow":
if tf_available:
return tf_mock
else:
raise ImportError
return orig_import(name, *args)
with mock.patch("builtins.__import__", side_effect=import_pt_tf_mock):
# neither pt or tf are available
pt_available = False
tf_available = False
self.assertEqual(Evaluator._infer_device(), -1)
# pt available but no GPU
pt_available = True
pt_mock.cuda.is_available.return_value = False
self.assertEqual(Evaluator._infer_device(), -1)
# pt available and GPU found
pt_mock.cuda.is_available.return_value = True
self.assertEqual(Evaluator._infer_device(), 0)
# tf available but no GPU
pt_available = False
tf_available = True
tf_mock.config.list_physical_devices.return_value = []
self.assertEqual(Evaluator._infer_device(), -1)
# tf available and GPU found
tf_mock.config.list_physical_devices.return_value = ["GPU:0", "GPU:1"]
self.assertEqual(Evaluator._infer_device(), 0)
# pt accelerator found and pipeline instantiated on CPU
pt_mock.cuda.is_available.return_value = True
self.assertRaises(
ValueError, Evaluator.check_for_mismatch_in_device_setup, Evaluator._infer_device(), self.pipe
)
# tf accelerator found and pipeline instantiated on CPU
pt_available = False
tf_available = True
self.assertRaises(
ValueError, Evaluator.check_for_mismatch_in_device_setup, Evaluator._infer_device(), self.pipe
)
def test_pipe_init(self):
self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
def test_model_init(self):
self.evaluator.compute(
model_or_pipeline=self.default_model,
tokenizer=self.default_tokenizer,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
def test_model_str_init(self):
self.evaluator.compute(
model_or_pipeline=self.default_ckpt,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
class TestTextClassificationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]})
self.default_model = "lvwerra/distilbert-imdb"
self.input_column = "text"
self.label_column = "label"
self.pipe = DummyTextClassificationPipeline()
self.perf_pipe = DummyTextClassificationPipeline(sleep_time=0.1)
self.evaluator = evaluator("text-classification")
self.label_mapping = {"NEGATIVE": 0.0, "POSITIVE": 1.0}
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
input_column=self.input_column,
label_column=self.label_column,
label_mapping=self.label_mapping,
)
model = AutoModelForSequenceClassification.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
self.assertEqual(results["accuracy"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
tokenizer=tokenizer,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
def test_class_init(self):
evaluator = TextClassificationEvaluator()
self.assertEqual(evaluator.task, "text-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="f1",
label_mapping=self.label_mapping,
)
self.assertEqual(results["f1"], 1.0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
def test_data_loading(self):
# Test passing in dataset by name with split
data = self.evaluator.load_data("evaluate/imdb-ci", split="test[:1]")
self.evaluator.prepare_data(data=data, input_column="text", label_column="label", second_input_column=None)
# Test passing in dataset by name without split and inferring the optimal split
data = self.evaluator.load_data("evaluate/imdb-ci")
self.evaluator.prepare_data(data=data, input_column="text", label_column="label", second_input_column=None)
# Test that it chooses the correct one (e.g. imdb only has train and test, but no validation)
self.assertEqual(data.split, "test")
# Test that the data point returned is correct; this maps to the first example in the dataset
self.assertEqual(data[0]["text"], "I love movies about whales!")
# Test loading subset of a dataset with the `name` field
data = self.evaluator.load_data("evaluate/glue-ci", subset="cola", split="test")
self.assertEqual(isinstance(data, Dataset), True)
# Test loading subset of a dataset with the `name` field and having it infer the split
data = self.evaluator.load_data("evaluate/glue-ci", subset="cola")
self.assertEqual(isinstance(data, Dataset), True)
def test_overwrite_default_metric(self):
accuracy = load("accuracy")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
def test_bootstrap(self):
data = Dataset.from_dict({"label": [1, 0, 0], "text": ["great movie", "great movie", "horrible movie"]})
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=data,
metric="accuracy",
label_mapping=self.label_mapping,
strategy="bootstrap",
n_resamples=10,
random_state=0,
)
self.assertAlmostEqual(results["accuracy"]["score"], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.33333, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["standard_error"], 0.22498, 5)
def test_perf(self):
results = self.evaluator.compute(
model_or_pipeline=self.perf_pipe,
data=self.data,
metric="accuracy",
input_column=self.input_column,
label_column=self.label_column,
label_mapping=self.label_mapping,
n_resamples=10,
random_state=0,
)
self.assertEqual(results["accuracy"], 1.0)
self.assertAlmostEqual(results["total_time_in_seconds"], 0.1, 1)
self.assertAlmostEqual(results["samples_per_second"], len(self.data) / results["total_time_in_seconds"], 5)
self.assertAlmostEqual(results["latency_in_seconds"], results["total_time_in_seconds"] / len(self.data), 5)
def test_bootstrap_and_perf(self):
data = Dataset.from_dict({"label": [1, 0, 0], "text": ["great movie", "great movie", "horrible movie"]})
results = self.evaluator.compute(
model_or_pipeline=self.perf_pipe,
data=data,
metric="accuracy",
input_column=self.input_column,
label_column=self.label_column,
label_mapping=self.label_mapping,
strategy="bootstrap",
n_resamples=10,
random_state=0,
)
self.assertAlmostEqual(results["accuracy"]["score"], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.333333, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["standard_error"], 0.22498285, 5)
self.assertAlmostEqual(results["total_time_in_seconds"], 0.1, 1)
self.assertAlmostEqual(results["samples_per_second"], len(data) / results["total_time_in_seconds"], 5)
self.assertAlmostEqual(results["latency_in_seconds"], results["total_time_in_seconds"] / len(data), 5)
class TestTextClassificationEvaluatorTwoColumns(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"label": [1, 0],
"premise": ["great car", "great movie"],
"hypothesis": ["great vehicle", "horrible movie"],
}
)
self.default_model = "prajjwal1/bert-tiny-mnli"
self.input_column = "premise"
self.second_input_column = "hypothesis"
self.label_column = "label"
self.pipe = DummyTextClassificationPipeline()
self.evaluator = evaluator("text-classification")
self.label_mapping = {"NEGATIVE": 0.0, "POSITIVE": 1.0}
self.label_mapping2 = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
input_column=self.input_column,
second_input_column=self.second_input_column,
label_column="label",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
input_column=self.input_column,
second_input_column=self.second_input_column,
label_column=self.label_column,
label_mapping=self.label_mapping2,
)
self.assertEqual(results["accuracy"], 1.0)
model = AutoModelForSequenceClassification.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
input_column=self.input_column,
second_input_column=self.second_input_column,
tokenizer=tokenizer,
label_mapping=self.label_mapping2,
)
self.assertEqual(results["accuracy"], 1.0)
class TestImageClassificationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"label": [2, 2],
"image": [Image.new("RGB", (500, 500), (255, 255, 255)), Image.new("RGB", (500, 500), (170, 95, 170))],
}
)
self.default_model = "lysandre/tiny-vit-random"
self.pipe = DummyImageClassificationPipeline()
self.evaluator = evaluator("image-classification")
self.label_mapping = AutoConfig.from_pretrained(self.default_model).label2id
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
model = AutoModelForImageClassification.from_pretrained(self.default_model)
feature_extractor = AutoFeatureExtractor.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
feature_extractor=feature_extractor,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_class_init(self):
evaluator = ImageClassificationEvaluator()
self.assertEqual(evaluator.task, "image-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_overwrite_default_metric(self):
accuracy = load("accuracy")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
class TestQuestionAnsweringEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"id": ["56be4db0acb8001400a502ec", "56be4db0acb8001400a502ed"],
"context": ["My name is Felix and I love cookies!", "Misa name is Felix and misa love cookies!"],
"answers": [{"text": ["Felix"], "answer_start": [11]}, {"text": ["Felix"], "answer_start": [13]}],
"question": ["What is my name?", "What is my name?"],
}
)
self.data_v2 = Dataset.from_dict(
{
"id": ["56be4db0acb8001400a502ec", "56be4db0acb8001400a502ed"],
"context": ["My name is Felix and I love cookies!", "Let's explore the city!"],
"answers": [{"text": ["Felix"], "answer_start": [11]}, {"text": [], "answer_start": []}],
"question": ["What is my name?", "What is my name?"],
}
)
self.default_model = "mrm8488/bert-tiny-finetuned-squadv2"
self.pipe = DummyQuestionAnsweringPipeline(v2=False)
self.pipe_v2 = DummyQuestionAnsweringPipeline(v2=True)
self.evaluator = evaluator("question-answering")
def test_pipe_init(self):
# squad_v1-like dataset
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
@slow
def test_model_init(self):
# squad_v1-like dataset
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="squad",
)
self.assertEqual(results["exact_match"], 0)
self.assertEqual(results["f1"], 100 / 3)
model = AutoModelForQuestionAnswering.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="squad",
tokenizer=tokenizer,
)
self.assertEqual(results["exact_match"], 0)
self.assertEqual(results["f1"], 100 / 3)
def test_class_init(self):
# squad_v1-like dataset
evaluator = QuestionAnsweringEvaluator()
self.assertEqual(evaluator.task, "question-answering")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="squad",
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
# squad_v2-like dataset
evaluator = QuestionAnsweringEvaluator()
self.assertEqual(evaluator.task, "question-answering")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe_v2,
data=self.data_v2,
metric="squad_v2",
)
self.assertDictEqual(
{key: results[key] for key in ["HasAns_f1", "NoAns_f1"]}, {"HasAns_f1": 100.0, "NoAns_f1": 100.0}
)
@slow
def test_default_pipe_init(self):
# squad_v1-like dataset
results = self.evaluator.compute(
data=self.data,
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
# squad_v2-like dataset
results = self.evaluator.compute(
data=self.data_v2,
metric="squad_v2",
)
self.assertDictEqual(
{key: results[key] for key in ["HasAns_f1", "NoAns_f1"]}, {"HasAns_f1": 100.0, "NoAns_f1": 0.0}
)
def test_data_loading(self):
# Test passing in dataset by name with data_split
data = self.evaluator.load_data("evaluate/squad-ci", split="validation[:1]")
self.evaluator.prepare_data(
data=data, question_column="question", context_column="context", id_column="id", label_column="answers"
)
# Test passing in dataset by name without data_split and inferring the optimal split
data = self.evaluator.load_data("evaluate/squad-ci")
self.evaluator.prepare_data(
data=data, question_column="question", context_column="context", id_column="id", label_column="answers"
)
# Test that it chooses the correct one (e.g. squad only has train and validation, but no test)
self.assertEqual(data.split, "validation")
# Test that the data point returned is correct; this maps to the first example in the squad-ci dataset
self.assertEqual(data[0]["id"], "56be4db0acb8001400a502ec")
def test_overwrite_default_metric(self):
# squad_v1-like dataset
squad = load("squad")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=squad,
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="squad",
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
class TestTokenClassificationEvaluator(TestCase):
def setUp(self):
features = Features(
{
"tokens": Sequence(feature=Value(dtype="string")),
"ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC"])),
}
)
self.data = Dataset.from_dict(
{
"tokens": [["New", "York", "a", "nice", "City", "."]],
"ner_tags": [[1, 2, 0, 0, 1, 0]],
},
features=features,
)
self.default_model = "hf-internal-testing/tiny-bert-for-token-classification"
self.pipe = DummyTokenClassificationPipeline()
self.evaluator = evaluator("token-classification")
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="seqeval",
)
self.assertEqual(results["overall_accuracy"], 0.5)
model = AutoModelForTokenClassification.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="seqeval",
tokenizer=tokenizer,
)
self.assertEqual(results["overall_accuracy"], 0.5)
def test_class_init(self):
evaluator = TokenClassificationEvaluator()
self.assertEqual(evaluator.task, "token-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="seqeval",
)
self.assertEqual(results["overall_accuracy"], 1.0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
)
self.assertEqual(results["overall_accuracy"], 2 / 3)
def test_overwrite_default_metric(self):
accuracy = load("seqeval")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
)
self.assertEqual(results["overall_accuracy"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="seqeval",
)
self.assertEqual(results["overall_accuracy"], 1.0)
def test_data_loading(self):
# Test passing in dataset by name with data_split
data = self.evaluator.load_data("evaluate/conll2003-ci", split="validation[:1]")
self.evaluator.prepare_data(
data=data,
input_column="tokens",
label_column="ner_tags",
join_by=" ",
)
# Test passing in dataset by name without data_split and inferring the optimal split
data = self.evaluator.load_data("evaluate/conll2003-ci")
self.evaluator.prepare_data(
data=data,
input_column="tokens",
label_column="ner_tags",
join_by=" ",
)
# Test that it chooses the correct one (e.g. conll2003 has train, validation, test but should select test)
self.assertEqual(data.split, "test")
# Test that the data point returned is correct; this maps to the first example in the dataset
self.assertEqual(data[0]["id"], "0")
def test_wrong_task(self):
self.assertRaises(KeyError, evaluator, "bad_task")
def test_words_to_offsets(self):
task_evaluator = evaluator("token-classification")
words = ["This", "is", "a", "test", "."]
join_by = " "
offsets = task_evaluator.words_to_offsets(words, join_by)
self.assertListEqual([(0, 3), (5, 6), (8, 8), (10, 13), (15, 15)], offsets)
words = ["日", "本", "語", "はなせるの?"]
join_by = ""
offsets = task_evaluator.words_to_offsets(words, join_by)
self.assertListEqual([(0, 0), (1, 1), (2, 2), (3, 8)], offsets)
def test_predictions_processor(self):
task_evaluator = evaluator("token-classification")
join_by = " "
words = [["New", "York", "a", "nice", "City", "."]]
# aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 2, "entity": "I-LOC"},
{"start": 4, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "I-LOC", "O", "O", "B-LOC", "O"])
# non-aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 2, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "O", "O", "O", "B-LOC", "O"])
# non-aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 6, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "O", "O", "O", "B-LOC", "O"])
# non-aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "O", "O", "O", "B-LOC", "O"])
class TestTextGenerationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict({"text": ["Lorem ipsum"]})
self.pipe = DummyTextGenerationPipeline(num_return_sequences=4)
self.evaluator = evaluator("text-generation")
def test_class_init(self):
evaluator = TextGenerationEvaluator()
self.assertEqual(evaluator.task, "text-generation")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="word_count",
)
self.assertIsInstance(results["unique_words"], int)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(data=self.data)
self.assertIsInstance(results["unique_words"], int)
def test_overwrite_default_metric(self):
word_length = load("word_length")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=word_length,
)
self.assertIsInstance(results["average_word_length"], int)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="word_length",
)
self.assertIsInstance(results["average_word_length"], int)
def test_process_predictions_multiple_return_sequences(self):
processed_predictions = self.evaluator.predictions_processor(
[
[{"generated_text": "A"}, {"generated_text": "B"}],
[{"generated_text": "C"}, {"generated_text": "D"}],
]
)
self.assertEqual(processed_predictions, {"data": ["A", "B", "C", "D"]})
class TestText2TextGenerationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"text": ["Lorem ipsum"] * 4,
"label": ["Ipsum Lorem"] * 4,
}
)
self.pipe = DummyText2TextGenerationPipeline()
self.evaluator = evaluator("text2text-generation")
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
)
self.assertEqual(results["bleu"], 0)
def test_class_init(self):
evaluator = Text2TextGenerationEvaluator()
self.assertEqual(evaluator.task, "text2text-generation")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="bleu",
)
self.assertEqual(results["bleu"], 0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(data=self.data)
self.assertEqual(results["bleu"], 0)
def test_overwrite_default_metric(self):
rouge = load("rouge")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=rouge,
)
self.assertEqual(results["rouge1"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="rouge",
)
self.assertEqual(results["rouge1"], 1.0)
def test_summarization(self):
pipe = DummyText2TextGenerationPipeline(task="summarization", prefix="summary")
e = evaluator("summarization")
results = e.compute(
model_or_pipeline=pipe,
data=self.data,
)
self.assertEqual(results["rouge1"], 1.0)
def test_translation(self):
pipe = DummyText2TextGenerationPipeline(task="translation", prefix="translation")
e = evaluator("translation")
results = e.compute(
model_or_pipeline=pipe,
data=self.data,
)
self.assertEqual(results["bleu"], 0)
class TestAutomaticSpeechRecognitionEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"path": [
# Examples copied from default speech model of
# `automic-speech-recognition` pipeline:
# https://huggingface.co/facebook/wav2vec2-base-960h
# https://github.com/huggingface/transformers/blob/main/src/transformers/pipelines/__init__.py#L161
"https://cdn-media.huggingface.co/speech_samples/sample1.flac",
"https://cdn-media.huggingface.co/speech_samples/sample2.flac",
],
"sentence": ["Ipsum Lorem"] * 2,
}
)
self.pipe = DummyAutomaticSpeechRecognitionPipeline()
self.evaluator = evaluator("automatic-speech-recognition")
def test_pipe_init(self):
print(self.evaluator)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
)
print(results)
self.assertEqual(results["wer"], 1.0)
def test_class_init(self):
evaluator = AutomaticSpeechRecognitionEvaluator()
self.assertEqual(evaluator.task, "automatic-speech-recognition")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="wer",
)
self.assertEqual(results["wer"], 1.0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(data=self.data)
self.assertGreater(results["wer"], 1.0)
def test_overwrite_default_metric(self):
cer = load("cer")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=cer,
)
self.assertEqual(results["cer"], 0.7272727272727273)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="cer",
)
self.assertEqual(results["cer"], 0.7272727272727273)
class TestAudioClassificationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{"file": ["https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac"], "label": [11]}
)
self.raw_data = Dataset.from_dict(
{
"audio": [
np.array(
[-0.00048828, -0.00018311, -0.00137329, 0.00079346, 0.00091553, 0.00085449], dtype=np.float32
)
],
"label": [11],
}
)
self.default_model = "superb/wav2vec2-base-superb-ks"
self.pipe = DummyAudioClassificationPipeline()
self.evaluator = evaluator("audio-classification")
self.label_mapping = AutoConfig.from_pretrained(self.default_model).label2id
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_raw_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe, data=self.raw_data, label_mapping=self.label_mapping, input_column="audio"
)
self.assertEqual(results["accuracy"], 0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
model = AutoModelForAudioClassification.from_pretrained(self.default_model)
feature_extractor = AutoFeatureExtractor.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
feature_extractor=feature_extractor,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_class_init(self):
evaluator = AudioClassificationEvaluator()
self.assertEqual(evaluator.task, "audio-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
results_raw = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.raw_data,
label_mapping=self.label_mapping,
metric="accuracy",
input_column="audio",
)
self.assertEqual(results_raw["accuracy"], 0)
self.assertEqual(results["accuracy"], 0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_overwrite_default_metric(self):
accuracy = load("accuracy")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
| 41,784 | 35.461606 | 119 | py |
evaluate | evaluate-main/tests/utils.py | import os
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from pathlib import Path
from unittest.mock import patch
from evaluate import config
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False)
_run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True)
_run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True)
def require_beam(test_case):
"""
Decorator marking a test that requires Apache Beam.
These tests are skipped when Apache Beam isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_faiss(test_case):
"""
Decorator marking a test that requires Faiss.
These tests are skipped when Faiss isn't installed.
"""
try:
import faiss # noqa
except ImportError:
test_case = unittest.skip("test requires faiss")(test_case)
return test_case
def require_regex(test_case):
"""
Decorator marking a test that requires regex.
These tests are skipped when Regex isn't installed.
"""
try:
import regex # noqa
except ImportError:
test_case = unittest.skip("test requires regex")(test_case)
return test_case
def require_elasticsearch(test_case):
"""
Decorator marking a test that requires ElasticSearch.
These tests are skipped when ElasticSearch isn't installed.
"""
try:
import elasticsearch # noqa
except ImportError:
test_case = unittest.skip("test requires elasticsearch")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not config.TF_AVAILABLE:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_jax(test_case):
"""
Decorator marking a test that requires JAX.
These tests are skipped when JAX isn't installed.
"""
if not config.JAX_AVAILABLE:
test_case = unittest.skip("test requires JAX")(test_case)
return test_case
def require_pil(test_case):
"""
Decorator marking a test that requires Pillow.
These tests are skipped when Pillow isn't installed.
"""
if not config.PIL_AVAILABLE:
test_case = unittest.skip("test requires Pillow")(test_case)
return test_case
def require_transformers(test_case):
"""
Decorator marking a test that requires transformers.
These tests are skipped when transformers isn't installed.
"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(test_case)
else:
return test_case
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests or _run_slow_tests == 0:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def local(test_case):
"""
Decorator marking a test as local
Local tests are run by default. Set the RUN_LOCAL environment variable
to a falsy value to not run them.
"""
if not _run_local_tests or _run_local_tests == 0:
test_case = unittest.skip("test is local")(test_case)
return test_case
def packaged(test_case):
"""
Decorator marking a test as packaged
Packaged tests are run by default. Set the RUN_PACKAGED environment variable
to a falsy value to not run them.
"""
if not _run_packaged_tests or _run_packaged_tests == 0:
test_case = unittest.skip("test is packaged")(test_case)
return test_case
def remote(test_case):
"""
Decorator marking a test as one that relies on GitHub or the Hugging Face Hub.
Remote tests are skipped by default. Set the RUN_REMOTE environment variable
to a falsy value to not run them.
"""
if not _run_remote_tests or _run_remote_tests == 0:
test_case = unittest.skip("test requires remote")(test_case)
return test_case
def for_all_test_methods(*decorators):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(fn) and name.startswith("test"):
for decorator in decorators:
fn = decorator(fn)
setattr(cls, name, fn)
return cls
return decorate
class RequestWouldHangIndefinitelyError(Exception):
pass
class OfflineSimulationMode(Enum):
CONNECTION_FAILS = 0
CONNECTION_TIMES_OUT = 1
HF_EVALUATE_OFFLINE_SET_TO_1 = 2
@contextmanager
def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16):
"""
Simulate offline mode.
There are three offline simulatiom modes:
CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call.
Connection errors are created by mocking socket.socket
CONNECTION_TIMES_OUT: the connection hangs until it times out.
The default timeout value is low (1e-16) to speed up the tests.
Timeout errors are created by mocking requests.request
HF_EVALUATE_OFFLINE_SET_TO_1: the HF_EVALUATE_OFFLINE environment variable is set to 1.
This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error.
"""
from requests import request as online_request
def timeout_request(method, url, **kwargs):
# Change the url to an invalid url so that the connection hangs
invalid_url = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout."
)
kwargs["timeout"] = timeout
try:
return online_request(method, invalid_url, **kwargs)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
e.request.url = url
max_retry_error = e.args[0]
max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),)
e.args = (max_retry_error,)
raise
def offline_socket(*args, **kwargs):
raise OSError("Offline mode is enabled.")
if mode is OfflineSimulationMode.CONNECTION_FAILS:
# inspired from https://stackoverflow.com/a/18601897
with patch("socket.socket", offline_socket):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.request", timeout_request):
with patch("requests.api.request", timeout_request):
yield
elif mode is OfflineSimulationMode.HF_EVALUATE_OFFLINE_SET_TO_1:
with patch("evaluate.config.HF_EVALUATE_OFFLINE", True):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def set_current_working_directory_to_temp_dir(*args, **kwargs):
original_working_dir = str(Path().resolve())
with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir:
try:
os.chdir(tmp_dir)
yield
finally:
os.chdir(original_working_dir)
def is_rng_equal(rng1, rng2):
return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist()
| 8,548 | 28.378007 | 108 | py |
evaluate | evaluate-main/tests/test_metric_common.py | # Copyright 2020 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import evaluate
from evaluate import load
from .utils import _run_slow_tests, for_all_test_methods, local, slow
REQUIRE_FAIRSEQ = {"comet"}
_has_fairseq = importlib.util.find_spec("fairseq") is not None
UNSUPPORTED_ON_WINDOWS = {"code_eval"}
_on_windows = os.name == "nt"
SLOW_METRIC = {"perplexity", "regard", "toxicity"}
def skip_if_metric_requires_fairseq(test_case):
@wraps(test_case)
def wrapper(self, evaluation_module_name, evaluation_module_type):
if not _has_fairseq and evaluation_module_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"')
else:
test_case(self, evaluation_module_name, evaluation_module_type)
return wrapper
def skip_on_windows_if_not_windows_compatible(test_case):
@wraps(test_case)
def wrapper(self, evaluation_module_name, evaluation_module_type):
if _on_windows and evaluation_module_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"')
else:
test_case(self, evaluation_module_name, evaluation_module_type)
return wrapper
def skip_slow_metrics(test_case):
@wraps(test_case)
def wrapper(self, evaluation_module_name, evaluation_module_type):
if not _run_slow_tests and evaluation_module_name in SLOW_METRIC:
self.skipTest('"test is slow"')
else:
test_case(self, evaluation_module_name, evaluation_module_type)
return wrapper
def get_local_module_names():
metrics = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")]
comparisons = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./comparisons/*/")]
measurements = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./measurements/*/")]
evaluation_modules = metrics + comparisons + measurements
evaluation_module_types = (
["metric"] * len(metrics) + ["comparison"] * len(comparisons) + ["measurement"] * len(measurements)
)
return [
{"testcase_name": f"{t}_{x}", "evaluation_module_name": x, "evaluation_module_type": t}
for x, t in zip(evaluation_modules, evaluation_module_types)
if x != "gleu" # gleu is unfinished
]
@parameterized.named_parameters(get_local_module_names())
@for_all_test_methods(skip_if_metric_requires_fairseq, skip_on_windows_if_not_windows_compatible, skip_slow_metrics)
@local
class LocalModuleTest(parameterized.TestCase):
INTENSIVE_CALLS_PATCHER = {}
evaluation_module_name = None
evaluation_module_type = None
def test_load(self, evaluation_module_name, evaluation_module_type):
doctest.ELLIPSIS_MARKER = "[...]"
evaluation_module = importlib.import_module(
evaluate.loading.evaluation_module_factory(
os.path.join(evaluation_module_type + "s", evaluation_module_name), module_type=evaluation_module_type
).module_path
)
evaluation_instance = evaluate.loading.import_main_class(evaluation_module.__name__)
# check parameters
parameters = inspect.signature(evaluation_instance._compute).parameters
self.assertTrue(all([p.kind != p.VAR_KEYWORD for p in parameters.values()])) # no **kwargs
# run doctest
with self.patch_intensive_calls(evaluation_module_name, evaluation_module.__name__):
with self.use_local_metrics(evaluation_module_type):
try:
results = doctest.testmod(evaluation_module, verbose=True, raise_on_error=True)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@slow
def test_load_real_metric(self, evaluation_module_name, evaluation_module_type):
doctest.ELLIPSIS_MARKER = "[...]"
metric_module = importlib.import_module(
evaluate.loading.evaluation_module_factory(
os.path.join(evaluation_module_type, evaluation_module_name)
).module_path
)
# run doctest
with self.use_local_metrics():
results = doctest.testmod(metric_module, verbose=True, raise_on_error=True)
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@contextmanager
def patch_intensive_calls(self, evaluation_module_name, module_name):
if evaluation_module_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[evaluation_module_name](module_name):
yield
else:
yield
@contextmanager
def use_local_metrics(self, evaluation_module_type):
def load_local_metric(evaluation_module_name, *args, **kwargs):
return load(os.path.join(evaluation_module_type + "s", evaluation_module_name), *args, **kwargs)
with patch("evaluate.load") as mock_load:
mock_load.side_effect = load_local_metric
yield
@classmethod
def register_intensive_calls_patcher(cls, evaluation_module_name):
def wrapper(patcher):
patcher = contextmanager(patcher)
cls.INTENSIVE_CALLS_PATCHER[evaluation_module_name] = patcher
return patcher
return wrapper
# Metrics intensive calls patchers
# --------------------------------
@LocalModuleTest.register_intensive_calls_patcher("bleurt")
def patch_bleurt(module_name):
import tensorflow.compat.v1 as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags
class MockedPredictor(Predictor):
def predict(self, input_dict):
assert len(input_dict["input_ids"]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor") as mock_create_predictor:
mock_create_predictor.return_value = MockedPredictor()
yield
@LocalModuleTest.register_intensive_calls_patcher("bertscore")
def patch_bertscore(module_name):
import torch
def bert_cos_score_idf(model, refs, *args, **kwargs):
return torch.tensor([[1.0, 1.0, 1.0]] * len(refs))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model"), patch(
"bert_score.scorer.bert_cos_score_idf"
) as mock_bert_cos_score_idf:
mock_bert_cos_score_idf.side_effect = bert_cos_score_idf
yield
@LocalModuleTest.register_intensive_calls_patcher("comet")
def patch_comet(module_name):
def load_from_checkpoint(model_path):
class Model:
def predict(self, data, *args, **kwargs):
assert len(data) == 2
scores = [0.19, 0.92]
return scores, sum(scores) / len(scores)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model") as mock_download_model:
mock_download_model.return_value = None
with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint:
mock_load_from_checkpoint.side_effect = load_from_checkpoint
yield
def test_seqeval_raises_when_incorrect_scheme():
metric = load(os.path.join("metrics", "seqeval"))
wrong_scheme = "ERROR"
error_message = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(ValueError, match=re.escape(error_message)):
metric.compute(predictions=[], references=[], scheme=wrong_scheme)
| 8,676 | 37.057018 | 118 | py |
evaluate | evaluate-main/tests/test_trainer_evaluator_parity.py | import json
import os
import shutil
import subprocess
import tempfile
import unittest
import numpy as np
import torch
import transformers
from datasets import load_dataset
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, Trainer, TrainingArguments, pipeline
from evaluate import evaluator, load
from .utils import slow
class TestEvaluatorTrainerParity(unittest.TestCase):
def setUp(self):
self.dir_path = tempfile.mkdtemp("evaluator_trainer_parity_test")
transformers_version = transformers.__version__
branch = ""
if not transformers_version.endswith(".dev0"):
branch = f"--branch v{transformers_version}"
subprocess.run(
f"git clone --depth 3 --filter=blob:none --sparse {branch} https://github.com/huggingface/transformers",
shell=True,
cwd=self.dir_path,
)
def tearDown(self):
shutil.rmtree(self.dir_path, ignore_errors=True)
def test_text_classification_parity(self):
model_name = "philschmid/tiny-bert-sst2-distilled"
subprocess.run(
"git sparse-checkout set examples/pytorch/text-classification",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
subprocess.run(
f"python examples/pytorch/text-classification/run_glue.py"
f" --model_name_or_path {model_name}"
f" --task_name sst2"
f" --do_eval"
f" --max_seq_length 9999999999" # rely on tokenizer.model_max_length for max_length
f" --output_dir {os.path.join(self.dir_path, 'textclassification_sst2_transformers')}"
f" --max_eval_samples 80",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'textclassification_sst2_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("glue", "sst2", split="validation[:80]")
pipe = pipeline(task="text-classification", model=model_name, tokenizer=model_name)
task_evaluator = evaluator(task="text-classification")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="accuracy",
input_column="sentence",
label_column="label",
label_mapping={"negative": 0, "positive": 1},
strategy="simple",
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"])
@slow
def test_text_classification_parity_two_columns(self):
model_name = "prajjwal1/bert-tiny-mnli"
max_eval_samples = 150
subprocess.run(
"git sparse-checkout set examples/pytorch/text-classification",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
subprocess.run(
f"python examples/pytorch/text-classification/run_glue.py"
f" --model_name_or_path {model_name}"
f" --task_name mnli"
f" --do_eval"
f" --max_seq_length 256"
f" --output_dir {os.path.join(self.dir_path, 'textclassification_mnli_transformers')}"
f" --max_eval_samples {max_eval_samples}",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'textclassification_mnli_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("glue", "mnli", split=f"validation_matched[:{max_eval_samples}]")
pipe = pipeline(task="text-classification", model=model_name, tokenizer=model_name, max_length=256)
task_evaluator = evaluator(task="text-classification")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="accuracy",
input_column="premise",
second_input_column="hypothesis",
label_column="label",
label_mapping={"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2},
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"])
def test_image_classification_parity(self):
# we can not compare to the Pytorch transformers example, that uses custom preprocessing on the images
model_name = "douwekiela/resnet-18-finetuned-dogfood"
dataset_name = "beans"
max_eval_samples = 120
raw_dataset = load_dataset(dataset_name, split="validation")
eval_dataset = raw_dataset.select(range(max_eval_samples))
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
model = AutoModelForImageClassification.from_pretrained(model_name)
def collate_fn(examples):
pixel_values = torch.stack(
[torch.tensor(feature_extractor(example["image"])["pixel_values"][0]) for example in examples]
)
labels = torch.tensor([example["labels"] for example in examples])
return {"pixel_values": pixel_values, "labels": labels}
metric = load("accuracy")
trainer = Trainer(
model=model,
args=TrainingArguments(
output_dir=os.path.join(self.dir_path, "imageclassification_beans_transformers"),
remove_unused_columns=False,
),
train_dataset=None,
eval_dataset=eval_dataset,
compute_metrics=lambda p: metric.compute(
predictions=np.argmax(p.predictions, axis=1), references=p.label_ids
),
tokenizer=None,
data_collator=collate_fn,
)
metrics = trainer.evaluate()
trainer.save_metrics("eval", metrics)
with open(
f"{os.path.join(self.dir_path, 'imageclassification_beans_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
pipe = pipeline(task="image-classification", model=model_name, feature_extractor=model_name)
task_evaluator = evaluator(task="image-classification")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="accuracy",
input_column="image",
label_column="labels",
label_mapping=model.config.label2id,
strategy="simple",
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"])
def test_question_answering_parity(self):
model_name_v1 = "anas-awadalla/bert-tiny-finetuned-squad"
model_name_v2 = "mrm8488/bert-tiny-finetuned-squadv2"
subprocess.run(
"git sparse-checkout set examples/pytorch/question-answering",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
# test squad_v1-like dataset
subprocess.run(
f"python examples/pytorch/question-answering/run_qa.py"
f" --model_name_or_path {model_name_v1}"
f" --dataset_name squad"
f" --do_eval"
f" --output_dir {os.path.join(self.dir_path, 'questionanswering_squad_transformers')}"
f" --max_eval_samples 100"
f" --max_seq_length 384",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'questionanswering_squad_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("squad", split="validation[:100]")
pipe = pipeline(
task="question-answering",
model=model_name_v1,
tokenizer=model_name_v1,
max_answer_len=30,
padding="max_length",
)
task_evaluator = evaluator(task="question-answering")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="squad",
strategy="simple",
)
self.assertEqual(transformers_results["eval_f1"], evaluator_results["f1"])
self.assertEqual(transformers_results["eval_exact_match"], evaluator_results["exact_match"])
# test squad_v2-like dataset
subprocess.run(
f"python examples/pytorch/question-answering/run_qa.py"
f" --model_name_or_path {model_name_v2}"
f" --dataset_name squad_v2"
f" --version_2_with_negative"
f" --do_eval"
f" --output_dir {os.path.join(self.dir_path, 'questionanswering_squadv2_transformers')}"
f" --max_eval_samples 100"
f" --max_seq_length 384",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'questionanswering_squadv2_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("squad_v2", split="validation[:100]")
pipe = pipeline(
task="question-answering",
model=model_name_v2,
tokenizer=model_name_v2,
max_answer_len=30,
)
task_evaluator = evaluator(task="question-answering")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="squad_v2",
strategy="simple",
squad_v2_format=True,
)
self.assertEqual(transformers_results["eval_f1"], evaluator_results["f1"])
self.assertEqual(transformers_results["eval_HasAns_f1"], evaluator_results["HasAns_f1"])
self.assertEqual(transformers_results["eval_NoAns_f1"], evaluator_results["NoAns_f1"])
def test_token_classification_parity(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
n_samples = 500
subprocess.run(
"git sparse-checkout set examples/pytorch/token-classification",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
subprocess.run(
f"python examples/pytorch/token-classification/run_ner.py"
f" --model_name_or_path {model_name}"
f" --dataset_name conll2003"
f" --do_eval"
f" --output_dir {os.path.join(self.dir_path, 'tokenclassification_conll2003_transformers')}"
f" --max_eval_samples {n_samples}",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
os.path.join(self.dir_path, "tokenclassification_conll2003_transformers", "eval_results.json"), "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("conll2003", split=f"validation[:{n_samples}]")
pipe = pipeline(task="token-classification", model=model_name)
e = evaluator(task="token-classification")
evaluator_results = e.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="seqeval",
input_column="tokens",
label_column="ner_tags",
strategy="simple",
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["overall_accuracy"])
self.assertEqual(transformers_results["eval_f1"], evaluator_results["overall_f1"])
| 11,804 | 36.595541 | 116 | py |
evaluate | evaluate-main/tests/test_metric.py | import os
import pickle
import tempfile
import time
from multiprocessing import Pool
from unittest import TestCase, mock
import pytest
from datasets.features import Features, Sequence, Value
from evaluate.module import EvaluationModule, EvaluationModuleInfo, combine
from .utils import require_tf, require_torch
class DummyMetric(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}),
)
def _compute(self, predictions, references):
result = {}
if not predictions:
return result
else:
result["accuracy"] = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
try:
result["set_equality"] = set(predictions) == set(references)
except TypeError:
result["set_equality"] = None
return result
@classmethod
def predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 4, 3])
@classmethod
def predictions_and_references_strings(cls):
return (["a", "b", "c", "d"], ["a", "b", "d", "c"])
@classmethod
def expected_results(cls):
return {"accuracy": 0.5, "set_equality": True}
@classmethod
def other_predictions_and_references(cls):
return ([1, 3, 4, 5], [1, 2, 3, 4])
@classmethod
def other_expected_results(cls):
return {"accuracy": 0.25, "set_equality": False}
@classmethod
def distributed_predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4])
@classmethod
def distributed_expected_results(cls):
return {"accuracy": 0.75, "set_equality": False}
@classmethod
def separate_predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4])
@classmethod
def separate_expected_results(cls):
return [{"accuracy": 1.0, "set_equality": True}, {"accuracy": 0.5, "set_equality": False}]
class AnotherDummyMetric(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="another dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}),
)
def _compute(self, predictions, references):
return {"set_equality": False}
@classmethod
def expected_results(cls):
return {"set_equality": False}
def properly_del_metric(metric):
"""properly delete a metric on windows if the process is killed during multiprocessing"""
if metric is not None:
if metric.filelock is not None:
metric.filelock.release()
if metric.rendez_vous_lock is not None:
metric.rendez_vous_lock.release()
del metric.writer
del metric.data
del metric
def metric_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
time.sleep(wait)
results = metric.compute(predictions=preds, references=refs)
return results
finally:
properly_del_metric(metric)
def metric_add_batch_and_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
metric.add_batch(predictions=preds, references=refs)
time.sleep(wait)
results = metric.compute()
return results
finally:
properly_del_metric(metric)
def metric_add_and_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
time.sleep(wait)
results = metric.compute()
return results
finally:
properly_del_metric(metric)
class TestMetric(TestCase):
def test_dummy_metric(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
metric = DummyMetric(experiment_id="test_dummy_metric")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_dummy_metric")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_dummy_metric")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
# With keep_in_memory
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
self.assertDictEqual({}, metric.compute(predictions=[], references=[]))
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
with self.assertRaisesRegex(ValueError, "Mismatch in the number"):
metric.add_batch(predictions=[1, 2, 3], references=[1, 2, 3, 4])
del metric
def test_metric_with_cache_dir(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
with tempfile.TemporaryDirectory() as tmp_dir:
metric = DummyMetric(experiment_id="test_dummy_metric", cache_dir=tmp_dir)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
def test_concurrent_metrics(self):
preds, refs = DummyMetric.predictions_and_references()
other_preds, other_refs = DummyMetric.other_predictions_and_references()
expected_results = DummyMetric.expected_results()
other_expected_results = DummyMetric.other_expected_results()
metric = DummyMetric(experiment_id="test_concurrent_metrics")
other_metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
self.assertDictEqual(
other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs)
)
del metric, other_metric
metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
other_metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
metric.add_batch(predictions=preds, references=refs)
other_metric.add_batch(predictions=other_preds, references=other_refs)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs):
metric.add(prediction=pred, reference=ref)
other_metric.add(prediction=other_pred, reference=other_ref)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
del metric, other_metric
# With keep_in_memory
metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
self.assertDictEqual(
other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs)
)
metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
metric.add_batch(predictions=preds, references=refs)
other_metric.add_batch(predictions=other_preds, references=other_refs)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs):
metric.add(prediction=pred, reference=ref)
other_metric.add(prediction=other_pred, reference=other_ref)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
del metric, other_metric
def test_separate_experiments_in_parallel(self):
with tempfile.TemporaryDirectory() as tmp_dir:
(preds_0, refs_0), (preds_1, refs_1) = DummyMetric.separate_predictions_and_references()
expected_results = DummyMetric.separate_expected_results()
pool = Pool(processes=2)
results = pool.map(
metric_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
# more than one sec of waiting so that the second metric has to sample a new hashing name
results = pool.map(
metric_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 2),
(1, 0, preds_1, refs_1, None, tmp_dir, 2),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
results = pool.map(
metric_add_and_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
results = pool.map(
metric_add_batch_and_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
def test_distributed_metrics(self):
with tempfile.TemporaryDirectory() as tmp_dir:
(preds_0, refs_0), (preds_1, refs_1) = DummyMetric.distributed_predictions_and_references()
expected_results = DummyMetric.distributed_expected_results()
pool = Pool(processes=4)
results = pool.map(
metric_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0.5),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0.5),
(2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_1", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_1", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_add_batch_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_2", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_2", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
# To use several distributed metrics on the same local file system, need to specify an experiment_id
try:
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0),
(2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0),
],
)
except ValueError:
# We are fine with either raising a ValueError or computing well the metric
# Being sure we raise the error would means making the dummy dataset bigger
# and the test longer...
pass
else:
self.assertDictEqual(expected_results, results[0])
self.assertDictEqual(expected_results, results[2])
self.assertIsNone(results[1])
self.assertIsNone(results[3])
del results
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "exp_0", tmp_dir, 0),
(2, 1, preds_1, refs_1, "exp_0", tmp_dir, 0),
(2, 0, preds_0, refs_0, "exp_1", tmp_dir, 0),
(2, 1, preds_1, refs_1, "exp_1", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertDictEqual(expected_results, results[2])
self.assertIsNone(results[1])
self.assertIsNone(results[3])
del results
# With keep_in_memory is not allowed
with self.assertRaises(ValueError):
DummyMetric(
experiment_id="test_distributed_metrics_4",
keep_in_memory=True,
num_process=2,
process_id=0,
cache_dir=tmp_dir,
)
def test_dummy_metric_pickle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "metric.pt")
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
metric = DummyMetric(experiment_id="test_dummy_metric_pickle")
with open(tmp_file, "wb") as f:
pickle.dump(metric, f)
del metric
with open(tmp_file, "rb") as f:
metric = pickle.load(f)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
def test_input_numpy(self):
import numpy as np
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = np.array(preds), np.array(refs)
metric = DummyMetric(experiment_id="test_input_numpy")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_numpy")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_numpy")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
@require_torch
def test_input_torch(self):
import torch
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = torch.tensor(preds), torch.tensor(refs)
metric = DummyMetric(experiment_id="test_input_torch")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_torch")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_torch")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
@require_tf
def test_input_tf(self):
import tensorflow as tf
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = tf.constant(preds), tf.constant(refs)
metric = DummyMetric(experiment_id="test_input_tf")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_tf")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_tf")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
def test_string_casting(self):
metric = DummyMetric(experiment_id="test_string_casting")
metric.info.features = Features({"predictions": Value("string"), "references": Value("string")})
metric.compute(predictions=["a"], references=["a"])
with self.assertRaises(ValueError):
metric.compute(predictions=[1], references=[1])
metric = DummyMetric(experiment_id="test_string_casting_2")
metric.info.features = Features(
{"predictions": Sequence(Value("string")), "references": Sequence(Value("string"))}
)
metric.compute(predictions=[["a"]], references=[["a"]])
with self.assertRaises(ValueError):
metric.compute(predictions=["a"], references=["a"])
def test_string_casting_tested_once(self):
self.counter = 0
def checked_fct(fct): # wrapper function that increases a counter on each call
def wrapped(*args, **kwargs):
self.counter += 1
return fct(*args, **kwargs)
return wrapped
with mock.patch(
"evaluate.EvaluationModule._enforce_nested_string_type",
checked_fct(DummyMetric._enforce_nested_string_type),
):
metric = DummyMetric(experiment_id="test_string_casting_called_once")
metric.info.features = Features(
{"references": Sequence(Value("string")), "predictions": Sequence(Value("string"))}
)
refs = [["test"] * 10] * 10
preds = [["test"] * 10] * 10
metric.add_batch(references=refs, predictions=preds)
metric.add_batch(references=refs, predictions=preds)
# the function is called twice for every batch's input: once on the
# sequence and then recursively agin on the first input of the sequence
self.assertEqual(self.counter, 8)
def test_multiple_features(self):
metric = DummyMetric()
metric.info.features = [
Features({"predictions": Value("int64"), "references": Value("int64")}),
Features({"predictions": Value("string"), "references": Value("string")}),
]
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
metric.info.features = [
Features({"predictions": Value("string"), "references": Value("string")}),
Features({"predictions": Value("int64"), "references": Value("int64")}),
]
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
class MetricWithMultiLabel(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features(
{"predictions": Sequence(Value("int64")), "references": Sequence(Value("int64"))}
if self.config_name == "multilabel"
else {"predictions": Value("int64"), "references": Value("int64")}
),
)
def _compute(self, predictions=None, references=None):
return (
{
"accuracy": sum(i == j for i, j in zip(predictions, references)) / len(predictions),
}
if predictions
else {}
)
@pytest.mark.parametrize(
"config_name, predictions, references, expected",
[
(None, [1, 2, 3, 4], [1, 2, 4, 3], 0.5), # Multiclass: Value("int64")
(
"multilabel",
[[1, 0], [1, 0], [1, 0], [1, 0]],
[[1, 0], [0, 1], [1, 1], [0, 0]],
0.25,
), # Multilabel: Sequence(Value("int64"))
],
)
def test_metric_with_multilabel(config_name, predictions, references, expected, tmp_path):
cache_dir = tmp_path / "cache"
metric = MetricWithMultiLabel(config_name, cache_dir=cache_dir)
results = metric.compute(predictions=predictions, references=references)
assert results["accuracy"] == expected
def test_safety_checks_process_vars():
with pytest.raises(ValueError):
_ = DummyMetric(process_id=-2)
with pytest.raises(ValueError):
_ = DummyMetric(num_process=2, process_id=3)
class AccuracyWithNonStandardFeatureNames(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features({"inputs": Value("int64"), "targets": Value("int64")}),
)
def _compute(self, inputs, targets):
return (
{
"accuracy": sum(i == j for i, j in zip(inputs, targets)) / len(targets),
}
if targets
else {}
)
@classmethod
def inputs_and_targets(cls):
return ([1, 2, 3, 4], [1, 2, 4, 3])
@classmethod
def expected_results(cls):
return {"accuracy": 0.5}
def test_metric_with_non_standard_feature_names_add(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
for input, target in zip(inputs, targets):
metric.add(inputs=input, targets=target)
results = metric.compute()
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
def test_metric_with_non_standard_feature_names_add_batch(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
metric.add_batch(inputs=inputs, targets=targets)
results = metric.compute()
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
def test_metric_with_non_standard_feature_names_compute(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
results = metric.compute(inputs=inputs, targets=targets)
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
class TestEvaluationcombined_evaluation(TestCase):
def test_single_module(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
combined_evaluation = combine([DummyMetric()])
self.assertDictEqual(expected_results, combined_evaluation.compute(predictions=preds, references=refs))
def test_add(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
combined_evaluation = combine([DummyMetric()])
for pred, ref in zip(preds, refs):
combined_evaluation.add(pred, ref)
self.assertDictEqual(expected_results, combined_evaluation.compute())
def test_add_batch(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
combined_evaluation = combine([DummyMetric()])
combined_evaluation.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, combined_evaluation.compute())
def test_force_prefix_with_dict(self):
prefix = "test_prefix"
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
expected_results[f"{prefix}_accuracy"] = expected_results.pop("accuracy")
expected_results[f"{prefix}_set_equality"] = expected_results.pop("set_equality")
combined_evaluation = combine({prefix: DummyMetric()}, force_prefix=True)
self.assertDictEqual(expected_results, combined_evaluation.compute(predictions=preds, references=refs))
def test_duplicate_module(self):
preds, refs = DummyMetric.predictions_and_references()
dummy_metric = DummyMetric()
dummy_result = DummyMetric.expected_results()
combined_evaluation = combine([dummy_metric, dummy_metric])
expected_results = {}
for i in range(2):
for k in dummy_result:
expected_results[f"{dummy_metric.name}_{i}_{k}"] = dummy_result[k]
self.assertDictEqual(expected_results, combined_evaluation.compute(predictions=preds, references=refs))
def test_two_modules_with_same_score_name(self):
preds, refs = DummyMetric.predictions_and_references()
dummy_metric = DummyMetric()
another_dummy_metric = AnotherDummyMetric()
dummy_result_1 = DummyMetric.expected_results()
dummy_result_2 = AnotherDummyMetric.expected_results()
dummy_result_1[dummy_metric.name + "_set_equality"] = dummy_result_1.pop("set_equality")
dummy_result_1[another_dummy_metric.name + "_set_equality"] = dummy_result_2["set_equality"]
combined_evaluation = combine([dummy_metric, another_dummy_metric])
self.assertDictEqual(dummy_result_1, combined_evaluation.compute(predictions=preds, references=refs))
def test_modules_from_string(self):
expected_result = {"accuracy": 0.5, "recall": 0.5, "precision": 1.0}
predictions = [0, 1]
references = [1, 1]
combined_evaluation = combine(["accuracy", "recall", "precision"])
self.assertDictEqual(
expected_result, combined_evaluation.compute(predictions=predictions, references=references)
)
def test_modules_from_string_poslabel(self):
expected_result = {"recall": 1.0, "precision": 0.5}
predictions = [0, 1, 0]
references = [1, 1, 0]
combined_evaluation = combine(["recall", "precision"])
self.assertDictEqual(
expected_result, combined_evaluation.compute(predictions=predictions, references=references, pos_label=0)
)
| 29,981 | 38.45 | 117 | py |
evaluate | evaluate-main/measurements/perplexity/perplexity.py | # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perplexity Metric."""
import datasets
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import evaluate
from evaluate import logging
_CITATION = """\
"""
_DESCRIPTION = """
Perplexity (PPL) can be used for evaluating to what extent a dataset is similar to the distribution of text that a given model was trained on.
It is defined as the exponentiated average negative log-likelihood of a sequence, calculated with exponent base `e`.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_KWARGS_DESCRIPTION = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
data (list of str): input data, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
max_length (int): the maximum length to truncate input texts to. Should be set to the maximum length the model supports. Defaults to None.
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = evaluate.load("perplexity", module_type="measurement")
>>> data = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... data=data) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 0))
647.0
>>> print(round(results["perplexities"][0], 0))
32.0
Example 2:
>>> from datasets import load_dataset
>>> perplexity = evaluate.load("perplexity", module_type="measurement")
>>> data = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")["text"][:10] # doctest: +SKIP
>>> data = [s for s in data if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... data=data)
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 2)) # doctest: +SKIP
576.76
>>> print(round(results["perplexities"][0], 2)) # doctest: +SKIP
889.28
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Perplexity(evaluate.Measurement):
def _info(self):
return evaluate.MeasurementInfo(
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"data": datasets.Value("string"),
}
),
reference_urls=["https://huggingface.co/docs/transformers/perplexity"],
)
def _compute(
self, data, model_id, batch_size: int = 16, add_start_token: bool = True, device=None, max_length=None
):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
device = "cuda"
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(model_id)
model = model.to(device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
existing_special_tokens = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(existing_special_tokens) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
if add_start_token and max_length:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
max_tokenized_len = max_length - 1
else:
max_tokenized_len = max_length
encodings = tokenizer(
data,
add_special_tokens=False,
padding=True,
truncation=True if max_tokenized_len else False,
max_length=max_tokenized_len,
return_tensors="pt",
return_attention_mask=True,
).to(device)
encoded_texts = encodings["input_ids"]
attn_masks = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1), 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1), 2)
), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
ppls = []
loss_fct = CrossEntropyLoss(reduction="none")
for start_index in logging.tqdm(range(0, len(encoded_texts), batch_size)):
end_index = min(start_index + batch_size, len(encoded_texts))
encoded_batch = encoded_texts[start_index:end_index]
attn_mask = attn_masks[start_index:end_index]
if add_start_token:
bos_tokens_tensor = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(device)
encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
attn_mask = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(device), attn_mask], dim=1
)
labels = encoded_batch
with torch.no_grad():
out_logits = model(encoded_batch, attention_mask=attn_mask).logits
shift_logits = out_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_attention_mask_batch = attn_mask[..., 1:].contiguous()
perplexity_batch = torch.exp(
(loss_fct(shift_logits.transpose(1, 2), shift_labels) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1)
)
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(ppls)}
| 8,604 | 43.35567 | 200 | py |
airloc | airloc-master/networks/rl_agent.py | import torch
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
import inspect
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m,nn.Linear) or isinstance(m,nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self,lambd):
super(LambdaLayer,self).__init__()
self.lambd = lambd
def forward(self,x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,in_filters,filters,stride=1,option='A'):
super(BasicBlock,self).__init__()
self.conv1 = nn.Conv2d(in_filters,filters,kernel_size=3,stride=stride,padding=1,bias=False)
self.bn1 = nn.BatchNorm2d(filters)
self.conv2 = nn.Conv2d(filters,filters,kernel_size=3,stride=1,padding=1,bias=False)
self.bn2 = nn.BatchNorm2d(filters)
self.shortcut = nn.Sequential()
if stride !=1 or in_filters!=filters:
if option=='A':
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:,:,::2,::2],(0,0,0,0,filters//4,filters//4),"constant",0))
elif option =='B':
self.shortcut=nn.Sequential(
nn.Conv2d(in_filters,self.expansion*filters,kernel_size=1,stride=stride,bias=False),
nn.BatchNorm2d(self.expansion*filters)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class CustomResNet(nn.Module):
def __init__(self,block,num_blocks,num_classes=2):
super(CustomResNet,self).__init__()
self.in_filters = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size = 3, stride = 1, padding = 1,
bias = True)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride = 1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride = 2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride = 2)
# Make pool layer
noise = torch.randn(1, 3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1])
# Run noise through layers to find out size for pool
out = F.relu(self.bn1(self.conv1(noise)))
out = self.layer1(out)
out = self.layer2(out)
shape = self.layer3(out).shape
self.pool_layer = nn.AvgPool2d(shape[3])
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, filters, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_filters , filters, stride))
self.in_filters = filters * block.expansion
return nn.Sequential(*layers)
def forward(self,x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.pool_layer(out)
out = torch.flatten(out,1)
# Add linear layer for 10 class prediction
out = self.linear(out)
return out
| 3,679 | 30.724138 | 111 | py |
airloc | airloc-master/networks/agent.py | import torch
import importlib.util
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
from utils.utils import calculate_cnn_output_size, cosine_sim_heatmap
import inspect
import time
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
class Agent(nn.Module):
"Implementation of non-rnn agent"
def __init__(self, unit_size = 256):
super(Agent,self).__init__()
# Define the
self.n_chan = 3
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['dubai']:
self.n_chan = 6
elif CONFIG.MISC_dataset in ['masa_filt']:
self.n_chan = 4
else:
raise(Exception("Define which type of segmentation this dataset has"))
# Size of the embeddings before concatinating
self.unit_size = unit_size
# Input size of the first concatenated layer
common_fc_inpt = 2 * unit_size
# Define the embedder
if 'Doerch' in CONFIG.RL_patch_embedder:
self.patch_emb = self._make_doerch_patch_embedder()
elif 'ShareNet' in CONFIG.RL_patch_embedder: # DEFAULT
self.patch_emb = self._make_doerch_patch_embedder()
else:
raise(Exception("Unknown Embedder:\t%s" % CONFIG.RL_patch_embedder) )
# Define the final fully connected layer
self.fc = nn.Linear(256 , 8) # Directions from current
self.softmax = nn.Softmax( dim = 1 )
self.AGENT_TYPE = 'REGULAR'
if CONFIG.RL_LSTM_pos_emb:
self._preallocate_pos_enc( max_len = 29)
# If enabled load a U-net and use it to predict the building segmentation mask
if CONFIG.RL_predict_seg_mask:
# First check if segmentation info is enabled, it shouldn't
if CONFIG.RL_priv_use_seg:
raise(Exception("Prediction of segmentation mask and ground truth segmentation mask cannot be enabled at the same time."))
self.seg_net = self._make_seg_net()
def _make_seg_net(self):
# Check that the specified segmentaion log exists
if not os.path.exists(CONFIG.RL_pretrained_segmentation_net):
raise(Exception("Segmentation log does not exist:\t%s" % (CONFIG.RL_pretrained_segmentation_net)))
networks_file_path = os.path.join(CONFIG.RL_pretrained_segmentation_net,"u_net.py")
spec = importlib.util.spec_from_file_location("u_net", networks_file_path)
segmentation_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(segmentation_networks)
with open(os.path.join(CONFIG.RL_pretrained_segmentation_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded for the rl agent
network_type = info['NetType']
if network_type == 'UNet':
seg_net = segmentation_networks.UNet(3,2)
else:
raise(Exception(f"Uknkown segmentation network {network_type}"))
# Load the weihgts
if not os.path.exists(os.path.join(CONFIG.RL_pretrained_segmentation_net,"final_unet")):
raise(Exception("No U-net found in:\t%s" % CONFIG.RL_pretrained_segmentation_net))
seg_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_segmentation_net, "final_unet"), map_location = torch.device('cpu')))
if True:
for param in seg_net.parameters():
param.requires_grad = False
else:
print("WARNING: No freeze selected for seg net. Training might not be possible since argmax is used")
return seg_net
def _has_visited(self,loc,locs):
return 2 in (loc == locs).sum(dim=1)
def _preallocate_pos_enc(self, dropout: float = 0.1, max_len: int = 16):
d_model = self.unit_size
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
div_term = torch.exp(torch.arange(0, d_model, 2) * ( - 0.01 ))
self.pe = torch.zeros( 1,max_len, d_model).to(CONFIG.device)
self.pe[0, :, 0::2] = torch.sin(position * div_term)
self.pe[0, :, 1::2] = torch.cos(position * div_term)
# Also construct two separate embedding with half dmodel
div_term_half = torch.exp(torch.arange(0, d_model // 2, 2) * ( - 0.01 ))
self.pe_half = torch.zeros(1 , max_len , d_model // 2).to(CONFIG.device)
self.pe_half[0, :, 0::2] = torch.sin(position * div_term_half)
self.pe_half[0, :, 1::2] = torch.cos(position * div_term_half)
if False:
grid_size = int( CONFIG.MISC_im_size[0] / CONFIG.MISC_patch_size[0] + 1 )
cosine_sim_pos = [(0,0) , (1,2) , (4,2),(0,2) , (3,1),(0,1),(1,0),(4,4),(4,3),(5,1),(0,5),(2,5)]
os.makedirs(os.path.join(CONFIG.STATS_log_dir, "positional_embeddings"),exist_ok=True)
for pos in cosine_sim_pos:
cosine_sim_heatmap(self.pe_half , pos = pos , grid_size = grid_size )
def embedd_position(self, x, locs , goal_emb_included = False):
""" Embedds position into the sequence. """
# First get position in image (x,y) normalized to (0,1)
xy_pos = locs[:,0:2] / (torch.tensor(CONFIG.MISC_im_size))
# Append x and y position (-1,-1) for goal crop(which is the last)
if goal_emb_included:
xy_pos = torch.cat((xy_pos, torch.tensor([[-1,-1]])) , dim = 0)
# Get position in grid
xy_grid = (locs[:,0:2] / (torch.tensor(CONFIG.MISC_patch_size))).long()
# We want the goal crop to get f(0) add that to grid
if goal_emb_included:
xy_grid = torch.cat(( xy_grid , torch.tensor([[0,0]])) , dim = 0 )
# Half the positional embedding is for x other half for y
pos_embedding = torch.flatten(self.pe_half[0,xy_grid] , start_dim = 1, end_dim = 2)
x_pos_emb = x + pos_embedding
return x_pos_emb
def _construct_patch_embedder(self):
""" Constructs the embedder network. A series of cnn layers. """
# [in_chan, out_chan, kernel, stride, padding]
max_pool = [3,2]
layers = []
modules = []
layers.append([self.n_chan, 16, 3, 1, 0])
layers.append([16, 32, 3, 1, 0])
# Construct layers
for layer in layers:
modules.append(nn.Conv2d(layer[0],layer[1],layer[2],layer[3],layer[4]))
modules.append(nn.ReLU())
modules.append(nn.MaxPool2d(max_pool[0],max_pool[1]))
# Calculate output size from CNN layers
out_size = calculate_cnn_output_size(layers, CONFIG.MISC_patch_size, max_pool)
linear_input_size = int(out_size[0] * out_size[1] * out_size[2])
# Flatten and add final linear layer
modules.append(nn.Flatten(start_dim = 1))
modules.append(nn.Linear(linear_input_size , self.unit_size))
embedder = nn.Sequential(*modules)
return embedder
def _make_doerch_patch_embedder(self):
if not os.path.exists(CONFIG.RL_pretrained_doerch_net):
print("The pretrained doerch net does not exist check the file path")
exit(1)
# Load the json file generated during pretraining
with open(os.path.join(CONFIG.RL_pretrained_doerch_net, "info.json"),'r') as io:
info = json.load(io)
dim = 8
# Determine which network should be loaded
network_type = info['NetType']
if network_type.startswith("Doerch"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "networks.py")
spec = importlib.util.spec_from_file_location("networks",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.AJNet(network_type, num_classes = dim)
elif network_type.startswith("ShareNet"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "share_net.py")
spec = importlib.util.spec_from_file_location("share_net",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.ShareNet(num_out_classes = dim)
else:
raise(Exception("Unknown encoder network "))
latentSpaceSize = info['LatentSpaceSize']
if CONFIG.RL_priv_pretrained:
feature_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_doerch_net,"doerch_embedder"), map_location=torch.device('cpu')))
return feature_net
def forward(self, episode):
step = episode.step
# First calculate embeddings of current crop and goal crop
x_curr = episode.crops[step , : , :,:].unsqueeze(0)
x_goal = episode.crop_goal
if CONFIG.RL_predict_seg_mask:
seg_net_input = torch.cat((x_curr, x_goal) , dim = 0)
seg_mask = self.seg_net(seg_net_input)
# Output is one channel per class, i.e, need to do argmax to get it to one channel for two class problems
# TODO add exception when not using two class seg masks
seg_mask = torch.argmax( seg_mask , dim = 1, keepdim = True)
x_curr = torch.cat((x_curr, seg_mask[0,:].unsqueeze(0)) , dim = 1)
x_goal = torch.cat((x_goal, seg_mask[1,:].unsqueeze(0)) , dim = 1)
if 'Doerch' in CONFIG.RL_patch_embedder or 'ShareNet' in CONFIG.RL_patch_embedder:
output, softmax = self.patch_emb(x_curr,x_goal)
if CONFIG.RL_LSTM_pos_emb:
x_curr_emb = self.embedd_position(output , episode.locs[step, :][None,:])
x_fc = self.fc(x_curr_emb)
return self.softmax(x_fc),softmax
else:
if CONFIG.MISC_priv:
x_o = torch.zeros([1,8]).to(CONFIG.device)
step_sz = int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
visited_locs = (episode.locs[:episode.step,:2]/step_sz).int()
loc = (episode.locs[episode.step,:2]/step_sz).int()
# Ensure no stepping outside the image
if loc[0] <= 0:
x_o[0,-1] = -1
x_o[0,:2] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
# Ensure not stepping in same location if possible
if episode.step == 0:
return self.softmax(softmax +1000000* x_o), None
if self._has_visited(loc - torch.as_tensor([1,0]),visited_locs):
x_o[0,0] = -1
if self._has_visited(loc - torch.as_tensor([1,-1]),visited_locs):
x_o[0,1] = -1
if self._has_visited(loc - torch.as_tensor([0,-1]),visited_locs):
x_o[0,2] = -1
if self._has_visited(loc - torch.as_tensor([-1,-1]),visited_locs):
x_o[0,3] = -1
if self._has_visited(loc - torch.as_tensor([-1,0]),visited_locs):
x_o[0,4] = -1
if self._has_visited(loc - torch.as_tensor([-1,1]),visited_locs):
x_o[0,5] = -1
if self._has_visited(loc - torch.as_tensor([0,1]),visited_locs):
x_o[0,6] = -1
if self._has_visited(loc - torch.as_tensor([1,1]),visited_locs):
x_o[0,7] = -1
if x_o.sum() == -8:
x_o = torch.zeros([1,8]).to(CONFIG.device)
# Ensure no stepping outside the image
# If the vector has been reset
if loc[0] <= 0:
x_o[0,:2] = -1
x_o[0,-1] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
return self.softmax(softmax+1000000*x_o), None
return self.softmax(softmax),output
x_goal_emb = self.patch_emb(x_goal)
x_curr_emb = self.patch_emb(x_curr)
# Concat all results
x_fc = torch.cat((x_curr_emb ,x_goal_emb) , dim = 1)
x_fc = self.fc(x_fc)
x_fc = self.softmax(x_fc)
return x_fc
| 13,032 | 40.243671 | 150 | py |
airloc | airloc-master/networks/RandomAgent.py | import torch
import importlib.util
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
from utils.utils import calculate_cnn_output_size
import inspect
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
class RandomAgent(nn.Module):
"Implementation of non-rnn agent"
def __init__(self, unit_size = 64):
super(RandomAgent,self).__init__()
self.softmax = nn.Softmax(dim=1)
self.AGENT_TYPE = 'RANDOM'
def _has_visited(self,loc,locs):
return 2 in (loc == locs).sum(dim=1)
def forward(self, episode):
# Assign Uniform probability to all classes
x_fc = torch.ones([1,8])
step_sz = int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
visited_locs = (episode.locs[:episode.step,:2]/step_sz).int()
loc = (episode.locs[episode.step,:2]/step_sz).int()
# Ensure no stepping outside the image
if loc[0] <= 0:
x_fc[0,-1] = 0
x_fc[0,:2] = 0
if loc[1] <= 0:
x_fc[0, 5:] = 0
if loc[0] >= 4:
x_fc[0, 3:6] = 0
if loc[1] >= 4:
x_fc[0, 1:4] = 0
# Ensure not stepping in same location if possible
if episode.step == 0:
return x_fc, None
if self._has_visited(loc - torch.as_tensor([1,0]),visited_locs):
x_fc[0,0] = 0
if self._has_visited(loc - torch.as_tensor([1,-1]),visited_locs):
x_fc[0,1] = 0
if self._has_visited(loc - torch.as_tensor([0,-1]),visited_locs):
x_fc[0,2] = 0
if self._has_visited(loc - torch.as_tensor([-1,-1]),visited_locs):
x_fc[0,3] = 0
if self._has_visited(loc - torch.as_tensor([-1,0]),visited_locs):
x_fc[0,4] = 0
if self._has_visited(loc - torch.as_tensor([-1,1]),visited_locs):
x_fc[0,5] = 0
if self._has_visited(loc - torch.as_tensor([0,1]),visited_locs):
x_fc[0,6] = 0
if self._has_visited(loc - torch.as_tensor([1,1]),visited_locs):
x_fc[0,7] = 0
if x_fc.sum() == 0:
x_fc = torch.ones([1,8])
# Ensure no stepping outside the image
# If the vector has been reset
if loc[0] <= 0:
x_fc[0,:2] = 0
x_fc[0,-1] = 0
if loc[1] <= 0:
x_fc[0, 5:] = 0
if loc[0] >= 4:
x_fc[0, 3:6] = 0
if loc[1] >= 4:
x_fc[0, 1:4] = 0
return x_fc, None
| 2,861 | 30.450549 | 76 | py |
airloc | airloc-master/networks/rnn_agents.py |
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
import os
import importlib.util
import time
import json
import math
from utils.utils import calculate_cnn_output_size, cosine_sim_heatmap
from utils.agent_utils import visualize_cnn_filter, get_outside
import matplotlib
import matplotlib.pyplot as plt
from config import CONFIG
class LSTMAgent(nn.Module):
""" AiRLoc agent """
def __init__(self, unit_size = 256):
super(LSTMAgent, self).__init__()
# Output size of individual units
self.unit_size = unit_size
# If ResNet8 is selected as embedder this options sets wether to freeze the weights or not
self.freeze_patch_embedder = CONFIG.RL_freeze_patch_embedder
# Determine the number of segmentation info channels to use
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
# The number of segmentation channels available depend on the dataset
if CONFIG.MISC_dataset == 'dubai':
self.seg_chan = 3
elif CONFIG.MISC_dataset == 'masa':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'masa_filt':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'masa_seven':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'dubai_seven':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'images_pre':
self.seg_chan = 1
else:
raise(Exception("Use segmentation information was selected but the dataset has no segmentation info."))
else:
self.seg_chan = 0
# Define the embedder
if CONFIG.RL_patch_embedder == 'Segmenter':
self.patch_emb = self._make_segmenter_patch_embedder()
elif 'Doerch' in CONFIG.RL_patch_embedder:
self.patch_emb = self._make_doerch_patch_embedder()
elif 'ShareNet' in CONFIG.RL_patch_embedder: # DEFAULT
self.patch_emb = self._make_doerch_patch_embedder()
elif CONFIG.RL_patch_embedder is None:
self.patch_emb = None
self.x_curr_emb = torch.zeros(1,1,256).to(CONFIG.device)
else:
raise(Exception("Unknown patch embedder selected in LSTMAgent:\t%s" % CONFIG.RL_patch_embedder))
# If enabled will also send a flattened grid location of the agent to the lstm
if CONFIG.RL_priv_grid_location:
im_H, im_W , patch_H , patch_W = *CONFIG.MISC_im_size , *CONFIG.MISC_patch_size
# Times two because two grids for current and previous positions
self.unit_size += 2 * int(im_H / patch_H) * int(im_W / patch_W)
if self.freeze_patch_embedder:
for param in self.patch_emb.parameters():
param.requires_grad = False
# Define the RNN
self.rnn = nn.LSTM(input_size = self.unit_size + 8 * CONFIG.EE_temporal, hidden_size = self.unit_size, num_layers = 1,
bias = True, batch_first = True, dropout = 0, bidirectional = False)
# If enabled, load a U-net and use it to predict the building segmentation mask
if CONFIG.RL_predict_seg_mask:
# First check if segmentation info is enabled, it shouldn't
if CONFIG.RL_priv_use_seg:
raise(Exception("Prediction of segmentation mask and ground truth segmentation mask cannot be enabled at the same time."))
self.seg_net = self._make_seg_net()
# TODO: Overwrites the line common_fc_input += self.unit_size above
common_fc_input = self.unit_size
# Define the final fully connected layer
self.fc = nn.Linear(common_fc_input , 8) # Directions from current
self.softmax = nn.Softmax( dim = 1 )
# Reset the agent to initialize the hidden states
self.reset()
# Set agent type to be able to rseset hidden states
self.AGENT_TYPE = 'RNN'
# If enabled the patch embeddings will be embedded with their absolute position in the image
# The below happens with default config (CONFIG.RL_LSTM_pos_emb = 'half')
if CONFIG.RL_LSTM_pos_emb:
self._preallocate_pos_enc(max_len = 29)
def _has_visited(self,loc,locs):
return 2 in (loc == locs).sum(dim=1)
def _make_seg_net(self):
# Check that the specified segmentaion log exists
if not os.path.exists(CONFIG.RL_pretrained_segmentation_net):
raise(Exception("Segmentation log does not exist:\t%s" % (CONFIG.RL_pretrained_segmentation_net)))
networks_file_path = os.path.join(CONFIG.RL_pretrained_segmentation_net,"u_net.py")
spec = importlib.util.spec_from_file_location("u_net", networks_file_path)
segmentation_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(segmentation_networks)
with open(os.path.join(CONFIG.RL_pretrained_segmentation_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded for the rl agent
network_type = info['NetType']
if network_type == 'UNet':
seg_net = segmentation_networks.UNet(3,2)
else:
raise(Exception(f"Uknkown segmentation network {network_type}"))
# Load the weights
if not os.path.exists(os.path.join(CONFIG.RL_pretrained_segmentation_net,"final_unet")):
raise(Exception("No U-net found in:\t%s" % CONFIG.RL_pretrained_segmentation_net))
seg_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_segmentation_net, "final_unet"), map_location = torch.device('cpu')))
for param in seg_net.parameters():
param.requires_grad = False
return seg_net
def _preallocate_pos_enc(self, dropout: float = 0.1, max_len: int = 16):
d_model = self.unit_size
if CONFIG.MISC_game_size == 7:
position = torch.arange(max_len).unsqueeze(1) * (5 - 2) / (CONFIG.MISC_game_size - 2)
else:
position = torch.arange(max_len).unsqueeze(1)
div_term_factor = -0.01
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
div_term = torch.exp(torch.arange(0, d_model, 2) * div_term_factor)
self.pe = torch.zeros( 1,max_len, d_model).to(CONFIG.device)
self.pe[0, :, 0::2] = torch.sin(position * div_term)
self.pe[0, :, 1::2] = torch.cos(position * div_term)
# Also construct two separate embedding with half dmodel
div_term_half = torch.exp(torch.arange(0, d_model // 2, 2) * div_term_factor)
self.pe_half = torch.zeros(1 , max_len , d_model // 2).to(CONFIG.device)
self.pe_half[0, :, 0::2] = torch.sin(position * div_term_half)
self.pe_half[0, :, 1::2] = torch.cos(position * div_term_half)
if False: # Plot of the similarity scores
grid_size = int( CONFIG.MISC_im_size[0] / CONFIG.MISC_patch_size[0] + 1 )
# cosine_sim_pos = [(0,0) , (1,2) , (4,2),(0,2) , (3,1),(0,1),(1,0),(4,4),(4,3),(5,1),(0,5),(2,5)]
cosine_sim_pos = [(0,0),(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(1,0),(1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(6,0),(6,1),(6,2),(6,3),(6,4),(6,5),(6,6)]
cosine_sim_pos = [(0,0),(0,1),(0,2),(0,3),(0,4),(1,0),(1,1),(1,2),(1,3),(1,4),(4,0),(4,1),(4,2),(4,3),(4,4)]
os.makedirs(os.path.join(CONFIG.STATS_log_dir, "positional_embeddings"),exist_ok = True)
for pos in cosine_sim_pos:
cosine_sim_heatmap(self.pe_half , pos = pos , grid_size = grid_size )
print("DONE")
time.sleep(999)
def embedd_position(self, x, locs , goal_emb_included = False):
""" Embedds position into the sequence. """
# First get position in image (x,y) normalized to (0,1)
xy_pos = locs[:,0:2] / (torch.tensor(CONFIG.MISC_im_size))
# Append x and y position (-1,-1) for goal crop(which is the last)
if goal_emb_included:
xy_pos = torch.cat((xy_pos, torch.tensor([[-1,-1]])) , dim = 0)
# Get position in grid
xy_grid = (locs[:,0:2] / (torch.tensor(CONFIG.MISC_patch_size))).long()
# We want the goal crop to get f(0) add that to grid
if goal_emb_included:
xy_grid = torch.cat(( xy_grid , torch.tensor([[0,0]])) , dim = 0 )
# Half the positional embedding is for x other half for y
pos_embedding = torch.flatten(self.pe_half[0,xy_grid] , start_dim = 1, end_dim = 2)
x_pos_emb = x + pos_embedding
return x_pos_emb
def _make_segmenter_patch_embedder(self):
"""
Retrieves the encoder part of the segmentation,
this requires only rgb input and the latent space from a segmentation
task
"""
if not os.path.exists(CONFIG.RL_pretrained_segmentation_net):
print("The segmentation encoder does not exist, check that the path is correct")
exit(1)
# Gets the network file from the log file and load it as a module
networks_file_path = os.path.join(CONFIG.RL_pretrained_segmentation_net,"u_net.py")
spec = importlib.util.spec_from_file_location("u_net", networks_file_path)
segmentation_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(segmentation_networks)
# Load the json file generated during the segmentation training
with open(os.path.join(CONFIG.RL_pretrained_segmentation_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded for the rl agent
network_type = info['NetType']
if network_type == 'UNet':
feature_net = segmentation_networks.UNet(3,2)
else:
raise(Exception(f"Ukn segmentation network {network_type}"))
latentSpaceSize = info['LatentSpaceSize']
if CONFIG.RL_priv_pretrained:
feature_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_segmentation_net, "final_unet")))
if self.freeze_patch_embedder:
for param in feature_net.parameters():
param.requires_grad = False
# Unroll the network and allow it to get the embedding
modules = list(feature_net.children())
middle = int(len(modules)/2)
modules = modules[:middle]
# Assumes 64,3,3
# modules.append(nn.Conv2d(64,64,3))
modules.append(nn.Flatten(start_dim = 1))
modules.append(nn.Linear(9*64, 128))
modules.append(nn.ReLU())
embedder = nn.Sequential(*modules)
return embedder
def _make_doerch_patch_embedder(self):
if not os.path.exists(CONFIG.RL_pretrained_doerch_net):
print("The pretrained doerch net does not exist check the file path")
exit(1)
# Load the json file generated during pretraining
with open(os.path.join(CONFIG.RL_pretrained_doerch_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded
network_type = info['NetType']
if network_type.startswith("Doerch"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "networks.py")
spec = importlib.util.spec_from_file_location("networks",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.AJNet(network_type)
elif network_type.startswith("ShareNet"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "share_net.py")
spec = importlib.util.spec_from_file_location("share_net",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.ShareNet()
else:
raise(Exception("Unknown encoder network "))
latentSpaceSize = info['LatentSpaceSize']
if CONFIG.RL_priv_pretrained:
feature_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_doerch_net,"doerch_embedder"), map_location=torch.device('cpu')))
if self.freeze_patch_embedder:
for param in feature_net.parameters():
param.requires_grad = False
return feature_net
def reset(self):
""" Resets the hidden states of the RNN network."""
# The size of the hidden states depend on the LSTM network
D = 2 if self.rnn.bidirectional else 1
self._hidden = torch.zeros(D * self.rnn.num_layers, 1, self.unit_size).to(CONFIG.device)
self._cell = torch.zeros(D * self.rnn.num_layers , 1,self.unit_size).to(CONFIG.device)
def forward(self, episode):
# For now only one mode, use latest available crop and step once
step = episode.step
x_curr = episode.crops[step , : , :,:].unsqueeze(0)
x_goal = episode.crop_goal
# If enabled predict segmentation mask and concat it to the patch input
if CONFIG.RL_predict_seg_mask:
seg_net_input = torch.cat((x_curr, x_goal) , dim = 0)
seg_mask = self.seg_net(seg_net_input)
# Output is one channel per class, i.e, need to do argmax to get it to one channel for two class problems
# TODO add exception when not using two class seg masks
seg_mask = torch.argmax( seg_mask , dim = 1, keepdim = True)
x_curr = torch.cat((x_curr, seg_mask[0,:].unsqueeze(0)) , dim = 1)
x_goal = torch.cat((x_goal, seg_mask[1,:].unsqueeze(0)) , dim = 1)
# First calculate embeddings of current crop and goal crop
if CONFIG.RL_patch_embedder is None:
x_curr_emb = self.x_curr_emb
x_softmax_emb = None
elif "Doerch" in CONFIG.RL_patch_embedder or "ShareNet" in CONFIG.RL_patch_embedder:
# DEFAULT BRANCH
x_curr_emb , x_softmax_emb = self.patch_emb(x_curr, x_goal)
x_curr_emb = x_curr_emb.unsqueeze(0)
else:
x_curr_emb = self.patch_emb(x_curr).unsqueeze(0)
x_goal_emb = self.patch_emb(x_goal).unsqueeze(0)
x_curr_emb = torch.cat((x_curr_emb,x_goal_emb), dim=2)
x_softmax_emb = None
# embedd position into result from patch_emb
if CONFIG.RL_LSTM_pos_emb:
x_curr_emb = self.embedd_position(x_curr_emb , episode.locs[step, :][None,:])
# If enabled send a flattened version of the grid location of the agent
if CONFIG.RL_priv_grid_location:
x_curr_emb = torch.cat((x_curr_emb ,
torch.flatten(episode.grid_loc[step,:]).unsqueeze(0).unsqueeze(0),
torch.flatten(episode.grid_curr_loc[step,:]).unsqueeze(0).unsqueeze(0)) , dim = 2)
# Append the patch embedders softmax embedding for further guidance
if CONFIG.EE_temporal:
x_curr_emb = torch.cat((x_curr_emb, x_softmax_emb.unsqueeze(0)), dim=2)
# Run embedding of current crop through LSTM network
x_curr_lstm , (self._hidden , self._cell) = self.rnn(x_curr_emb, (self._hidden , self._cell))
# Squeeze away the sequence dimension since it will always be one
x_curr_lstm = x_curr_lstm.squeeze(1)
# If the goal patch is not in the lstm append it after the lstm
x_fc = x_curr_lstm
x_fc = self.fc(x_fc)
if CONFIG.MISC_priv:
x_o = torch.zeros([1,8]).to(CONFIG.device)
step_sz = int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
visited_locs = (episode.locs[:episode.step,:2]/step_sz).int()
loc = (episode.locs[episode.step,:2]/step_sz).int()
# Ensure no stepping outside the image
if loc[0] <= 0:
x_o[0,-1] = -1
x_o[0,:2] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
# Ensure not stepping in same location if possible
if episode.step == 0:
return self.softmax(x_fc +1000000* x_o), None
if self._has_visited(loc - torch.as_tensor([1,0]),visited_locs):
x_o[0,0] = -1
if self._has_visited(loc - torch.as_tensor([1,-1]),visited_locs):
x_o[0,1] = -1
if self._has_visited(loc - torch.as_tensor([0,-1]),visited_locs):
x_o[0,2] = -1
if self._has_visited(loc - torch.as_tensor([-1,-1]),visited_locs):
x_o[0,3] = -1
if self._has_visited(loc - torch.as_tensor([-1,0]),visited_locs):
x_o[0,4] = -1
if self._has_visited(loc - torch.as_tensor([-1,1]),visited_locs):
x_o[0,5] = -1
if self._has_visited(loc - torch.as_tensor([0,1]),visited_locs):
x_o[0,6] = -1
if self._has_visited(loc - torch.as_tensor([1,1]),visited_locs):
x_o[0,7] = -1
if x_o.sum() == -8:
x_o = torch.zeros([1,8]).to(CONFIG.device)
# Ensure no stepping outside the image
# If the vector has been reset
if loc[0] <= 0:
x_o[0,:2] = -1
x_o[0,-1] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
return self.softmax(x_fc+1000000*x_o), None
if not CONFIG.RL_agent_allowed_outside:
outside = get_outside(episode).to(CONFIG.device)
x_fc = x_fc -10000* outside
if CONFIG.EE_residual:
x_fc += x_softmax_emb
x_fc = self.softmax(x_fc)
return x_fc, x_softmax_emb
| 18,173 | 43.004843 | 156 | py |
airloc | airloc-master/networks/deterministic_agent.py |
import torch
import importlib.util
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
from utils.utils import calculate_cnn_output_size
import inspect
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
class DeterministicAgent(nn.Module):
"Implementation of non-rnn agent"
def __init__(self, mode='spiral'):
super(DeterministicAgent,self).__init__()
self.mode = mode
self.softmax = nn.Softmax(dim=1)
self.AGENT_TYPE = 'DETERMINISTIC'
self.dir = -2
self.locked_in = False
def _has_visited(self,loc,locs):
if (loc<0).any() or (loc>=CONFIG.MISC_im_size[0]).any():
return True
return 2 in (loc == locs).sum(dim=1)
def _get_move(self,action):
if action < 0: action += 8
if action > 7: action -= 8
if action == 0:
return torch.as_tensor([-48,0])
if action == 1:
return torch.as_tensor([-48,48])
if action == 2:
return torch.as_tensor([0,48])
if action == 3:
return torch.as_tensor([48,48])
if action == 4:
return torch.as_tensor([48,0])
if action == 5:
return torch.as_tensor([48,-48])
if action == 6:
return torch.as_tensor([0,-48])
if action == 7:
return torch.as_tensor([-48,-48])
def _forward_spiral(self,episode):
# Assign zero probability to all classes
x_fc = torch.zeros([1,8])
# Get the current an all visited locations from the episode storage
visited_locs = episode.locs[:episode.step,:2]
loc = episode.locs[episode.step,:2]
# Ensure not stepping in same location if possible
if episode.step == 0:
for action in [0,2,4,6]:
next_loc = loc + self._get_move(action)
if not self._has_visited(next_loc,torch.as_tensor([[-1,-1]])):
if self._has_visited(next_loc + self._get_move(action+self.dir),torch.as_tensor([[-1,-1]])):
if self.dir == -2: self.dir=2
else: self.dir=-2
x_fc[0,action] = 1
return x_fc
last_action = episode.actions[episode.step - 1,:].argmax()
if self.locked_in and self._has_visited(loc + self._get_move(last_action),visited_locs):
x_fc[0,last_action] = 1
return x_fc
self.locked_in = False
if not self._has_visited(loc + self._get_move(last_action+self.dir),visited_locs):
a = last_action + self.dir
if a>7: a-=8
x_fc[0,a] = 1
return x_fc
elif not self._has_visited(loc + self._get_move(last_action),visited_locs):
x_fc[0,last_action] = 1
return x_fc
elif not self._has_visited(loc + self._get_move(last_action-self.dir),visited_locs):
a = last_action - self.dir
if a>7: a-=8
x_fc[0,a] = 1
if self.dir == -2: self.dir=2
else: self.dir=-2
return x_fc
else:
x_fc[0,last_action-4] = 1
self.locked_in = True
if self.dir == -2: self.dir=2
else: self.dir=-2
return x_fc
def _forward_rows(self, episode):
pass
def forward(self, episode):
if self.mode == 'spiral':
return self._forward_spiral(episode),None
elif self.mode == 'rows':
return self._forward_rows(episode),None
else:
raise(Exception(f"Unknown mode: \t {self.mode} selected for the DeterministicAgent"))
| 4,074 | 27.496503 | 113 | py |
airloc | airloc-master/networks/resnets.py |
import torch
import torch.nn as nn
import math
import random
import sys
import os
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
from config import CONFIG
class ResNet18(nn.Module):
def __init__(self, channels_in = 3, output_dim = 512 , use_pretrained_net = False):
super(ResNet18 , self).__init__()
# Construct a ResNet18 submodule
self.resnet = models.resnet18( pretrained = use_pretrained_net)
# Overwrite initial layer to match our specifications
self.resnet.conv1 = nn.Conv2d( in_channels = channels_in , out_channels = 64 ,
kernel_size = 7 , stride = 2 , padding = 3 , bias = False)
self.resnet.fc = nn.Linear(512 , output_dim )
def forward(self, x):
x_res = self.resnet.forward(x)
return x_res
| 983 | 19.5 | 87 | py |
airloc | airloc-master/training/train_agent.py | import traceback
import pdb
from datetime import datetime
import random
import signal
import time
import numpy as np
import json
import os
import sys
# matplotlib is used for debugging image inputs to networks
import matplotlib
import matplotlib.pyplot as plt
from copy import deepcopy
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import math
from config import CONFIG
from utils.utils import load_normalize_data, visualize_trajectory,\
get_deterministic_crops, _setup_fixed_games,\
visualize_batch, setupLogDir
# Import agent utils
from utils.agent_utils import take_step, get_action, run_eval_trajectory, update_net
from networks.agent import Agent
from networks.RandomAgent import RandomAgent
from networks.rnn_agents import LSTMAgent
from utils.stat_collector import StatCollector
from torch.utils.tensorboard import SummaryWriter
from utils.training_utils import BatchStorage , EpisodeStorage
# Creates logging directory for this run
setupLogDir(CONFIG.STATS_log_dir)
def signalInterruptHandler(*args):
"""
Signal handler for Interrup Signal. Saves current network weights and exits.
"""
response = ''
if CONFIG.MISC_main_pid != os.getpid():
return
while (response != "y" and response != "n"):
response = input("Program interrupted, do you want to exit? (y/n)\t")
if response == "n":
print("Continuing training!")
return
elif response == "y":
# Aborting training save information and exit
info['FinishedOnIter'] = tot_itr
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4) # Save final model
response = ''
while (response != "y" and response != "n"):
response = input("Do you want to save network weights? (y/n)\t")
if response == "y":
print("Saving network weights!")
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
elif response == "n":
print("Not saving network weights!")
# Needs to terminate from main thread. There is a check for this variable in intitalize episode
CONFIG.TERMINATE = True
def signalSIGUSR1Handler(*args):
"""
When program receives SIGUSR1 print the log directory.
"""
if CONFIG.MISC_main_pid == os.getpid():
print("\nLog directory:\t%s\n" % CONFIG.STATS_log_dir)
info = dict([
("AgentType" ,CONFIG.RL_agent_network),
("PatchEmbedder" , CONFIG.RL_patch_embedder),
("Completed" , False),
('Metrics' , [
'Steps',
'FinalDistanceOnlyFailure',
'IoU',
'ValSteps',
'ValFinalDistanceOnlyFailure',
'ValIoU',
'StepMSE',
'StepMax',
'CumulativeRewardToGo',
'HasConverged',
'StepRatioOnlySuccess',
'Entropy',
'Loss',
'Difficulty',
'ActionsTaken',
'ValDifficulty',
'ValHasConverged',
'ValStepRatioOnlySuccess',
'ValCumulativeRewardToGo',
'FullValSteps',
'FullValIoU',
'FullValDistance',
'FullValHasConverged',
'ValActionsTaken',
'SeparatedSteps',
'SeparatedIoU',
'SeparatedCumulativeRewardToGo',
'SeparatedHasConverged',
'ValSeparatedSteps',
'ValSeparatedIoU',
'ValSeparatedCumulativeRewardToGo',
'ValSeparatedHasConverged',
]),
("StartedTraining" , str(datetime.now())),
("FinishedTraining" , 0),
("Dataset",CONFIG.MISC_dataset),
("MultiplyImages" , CONFIG.RL_multiply_images),
("NbrOfTrainableParameters" , 0),
("AgentClass" , "RL"),
("FinishedOnIter" , 0),
("FullValIters", []), # At which iterations is the model evaluated on full validation
])
# Set random seed
random.seed(CONFIG.MISC_random_seed)
np.random.seed(CONFIG.MISC_random_seed)
torch.manual_seed(CONFIG.MISC_random_seed)
# load the dataset
trainloader,valloader = load_normalize_data(download = False, batch_size = CONFIG.RL_batch_size , multiply_images = CONFIG.RL_multiply_images)
valloader_iterator = iter(valloader)
im_H, im_W = CONFIG.MISC_im_size
p_H,p_W = CONFIG.MISC_patch_size
max_len_batch = CONFIG.RL_max_episode_length*CONFIG.RL_batch_size * CONFIG.RL_multiply_images
ep_len = CONFIG.RL_max_episode_length
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
# Make device globaly available
CONFIG.device = device
# Setup Agent
if CONFIG.RL_agent_network == 'LSTMAgent':
agent_net = LSTMAgent()
elif CONFIG.RL_agent_network == 'Agent':
agent_net = Agent()
elif CONFIG.RL_agent_network == 'RandomAgent':
agent_net = RandomAgent()
else:
raise "Unknown RL agent selected."
agent_net = agent_net.to(device)
agent_parameters = filter(lambda p: p.requires_grad, agent_net.parameters())
params = sum([np.prod(p.size()) for p in agent_parameters])
# Add number of parameters to the info json
info['NbrOfTrainableParameters'] = int(params)
# Setup loss
criterion = nn.MSELoss()
if CONFIG.RL_optimizer == 'sgd':
optimizer = optim.SGD(agent_net.parameters() , lr = CONFIG.RL_learning_rate ,
weight_decay = CONFIG.RL_weight_decay ,
momentum = CONFIG.RL_momentum)
elif CONFIG.RL_optimizer == 'adam':
optimizer = optim.Adam(agent_net.parameters() , lr = CONFIG.RL_learning_rate ,
weight_decay = CONFIG.RL_weight_decay ,
betas = (CONFIG.RL_beta1 , CONFIG.RL_beta2) )
# Write info dictionary to log directory
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4)
# Setup StatCollector
metrics = info['Metrics']
exclude_prints = [
'ActionsTaken' , 'ValActionsTaken', 'ValPropTime', 'ValStepTime', 'StepTime', 'SeparatedSteps', 'SeparatedIoU',
'SeparatedCumulativeRewardToGo', 'SeparatedHasConverged', 'ValSeparatedSteps', 'ValSeparatedIoU',
'ValSeparatedCumulativeRewardToGo', 'ValSeparatedHasConverged','CumulativeRewardToGo', 'ValCumulativeRewardToGo',
'StepRatio','ValStepRatio', 'HasConverged' , 'ValHasConverged','ValCorrectActions','CorrectActions'
] # Does not print these statistics
tot_nbr_iter = CONFIG.RL_nbr_epochs * len(trainloader)
tot_itr = 0
sc = StatCollector(CONFIG.STATS_metrics_dir, tot_nbr_iter , print_iter = CONFIG.MISC_print_iter, exclude_prints = exclude_prints)
# Add all metrics to StatCollector
for metric in metrics:
sc.register(metric , {'type':'avg' ,'freq':'step'})
# Open statistics for dataset to find unnormalizing transform
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Storage objects for all actions, weights, rewards etcetera
batch = BatchStorage(sc)
episode = EpisodeStorage()
val_batch = BatchStorage(sc)
val_episode = EpisodeStorage()
if CONFIG.MISC_include_baseline:
base_batch = BatchStorage(sc)
base_episode = EpisodeStorage()
base_agent = RandomAgent()
# If enabled there will be an entropy bonus and a linear annealing of this bonus
entropy_bonus = CONFIG.RL_entropy
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
# Linear annealing
entropy_anneal_k_exp = math.exp( math.log( CONFIG.RL_entropy_lower / entropy_bonus) / tot_nbr_iter)
# Increase recursion limit for debugging
sys.setrecursionlimit(2000)
# Initialize all clocations to zero to avoid crashing later
loc_goal = None
loc_start = None
start_coord = None
goal_coord = None
rep_counter = 91
# Print out info regarding this training run
print("Starting training at:\t%s" % info['StartedTraining'])
print("Agent Network:\t%s" % CONFIG.RL_agent_network)
print("Patch Embedder:\t%s" % CONFIG.RL_patch_embedder)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
print("Trainloader length:\t%d" % len(trainloader) )
print("Valloader length:\t%d" % len(valloader) )
# Attach signal handler for Interrupt signal
signal.signal(signal.SIGINT , signalInterruptHandler)
signal.signal(signal.SIGUSR1 , signalSIGUSR1Handler)
CONFIG.TERMINATE = False
try:
for epoch in range(CONFIG.RL_nbr_epochs):
# Passes through the dataset in batches
for batch_counter,batch_data in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = batch_data
batch.initialize(batch_size = len(batch_images))
# Each image/episode is handled seperatly
for (episode_counter, episode_data) in enumerate(batch_images):
full_image = episode_data[None,:].to(device)
loc_start , loc_goal = None, None
# Initializes all training tensors and sets start and goal patch
episode.initialize(image = full_image, loc_goal = loc_goal, loc_start = loc_start)
done = False
# Run one episode of training
while not done:
# Get an action from the agent, given current trajectory
action, softmax_embedding = get_action(agent_net , episode)
# Update environment according to action
loc_next, reward, done = take_step(action , episode, softmax_embedding)
# Get the visible crop at current position
crop_current,loc_current = get_deterministic_crops( full_image, coords = loc_next[0])
# Update the episode
episode.update(action, reward, loc_current , crop_current)
# Finish the episode, count rewards to go, iou etcetera
episode.finish()
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Debugging: Visualize training trajectory
#visualize_trajectory(episode, save_name= 'train_vis_%d_%d' %(tot_itr,episode_counter),transform = unNormImage)
# Add result from episode to batch
batch.append_episode(episode)
batch.prepare_for_loss()
if any(batch.weights[:,0]==torch.nan): print("Weights contains Nan")
#batch.sc.s('GradientSize').collect(agent_net.patch_emb.common_fc_2.weight.grad.abs().mean().item())
prev_net = deepcopy(agent_net)
t = time.perf_counter()
update_net(batch, agent_net, optimizer, entropy_bonus)
batch.proptime = time.perf_counter() - t
batch.store()
se = 0
max = 0.
tot_params = 0
for params1,params2 in zip(agent_net.parameters(),prev_net.parameters()):
temp_se = (params1-params2).square().sum().item()
se += temp_se
max = np.maximum(max ,(params1-params2).abs().max().item())
if temp_se > 0:
tot_params += torch.numel(params1)
se = np.sqrt(se/tot_params)
# Old loss update method now we send in the optimizer to the
# network
batch.sc.s('StepMSE').collect(se)
batch.sc.s('StepMax').collect(max)
# Run the current model on one batch from the valloader
with torch.no_grad():
try:
images , (start_coords_, goal_coords_) = next(valloader_iterator)
except:
valloader_iterator = iter(valloader)
images , (start_coords_, goal_coords_) = next(valloader_iterator)
val_batch.initialize(batch_size=len(images))
if tot_itr % CONFIG.MISC_save_vis_iter == 0 and CONFIG.MISC_include_baseline:
base_batch.initialize(batch_size=len(images))
for (counter_val , episode_data) in enumerate(images):
episode_image = episode_data[None , :].to(device)
start_coord , goal_coord = None, None
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
val_episode = run_eval_trajectory( episode_image, val_episode, agent_net, loc_start = start_coord , loc_goal= goal_coord)
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
if tot_itr % CONFIG.MISC_save_vis_iter == 0 and CONFIG.MISC_include_baseline:
loc_start = val_episode.loc_start
loc_goal = val_episode.loc_goal
base_episode = run_eval_trajectory( episode_image,
base_episode,
agent_net, loc_start = loc_start , loc_goal= loc_goal, deterministic=False)
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
base_batch.append_episode(base_episode)
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
val_batch.append_episode(val_episode)
if tot_itr % CONFIG.MISC_save_vis_iter == 0:
visualize_batch(val_batch, PATH = CONFIG.STATS_vis_dir, transform = unNormImage, save_name = 'val_batch_' + str(tot_itr))
if CONFIG.MISC_include_baseline:
visualize_batch(base_batch, PATH = CONFIG.STATS_vis_dir, transform = unNormImage, save_name = 'val_batch_' + str(tot_itr), prefix='random')
# Save result
val_batch.store(mode = 'Val')
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
entropy_bonus *= entropy_anneal_k_exp
if tot_itr % CONFIG.MISC_print_iter == 0 or tot_itr == tot_nbr_iter:
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
batch.sc.print()
batch.sc.save()
# Increment total iteration counter by one
tot_itr += 1
if tot_itr % CONFIG.MISC_save_model_iter == 0:
torch.save(agent_net.state_dict(), os.path.join(CONFIG.STATS_log_dir, "model_%d" % tot_itr))
# BATCH COMPLETE
except Exception as e:
info['Exception'] = str(e)
info['BackTrace'] = traceback.format_exc()
info['FinishedTraining'] = str(datetime.now())
info['FinishedOnIter'] = tot_itr
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4)
print("\nAn exception occurred in the main loop!\n" + "\n"+ "#"*60 + "\n")
print(info['BackTrace'])
print("#"*60)
# Enter pdb to investigate
pdb.set_trace()
print("Training finished!")
info['Completed'] = True
info["FinishedTraining"] = str(datetime.now())
info['FinishedOnIter'] = tot_itr
# Write completed status to info.json
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4) # Save final model
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
| 16,125 | 37.122931 | 163 | py |
airloc | airloc-master/training/run_deterministic.py |
from datetime import datetime
import random
import time
import numpy as np
import json
from shutil import copyfile
import os
import sys
# matplotlib is used for debugging image inputs to networks
import matplotlib
import matplotlib.pyplot as plt
#matplotlib.use('TkAgg')
from copy import deepcopy
import torch
from torch.distributions.multivariate_normal import MultivariateNormal
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import sys
import math
from config import CONFIG
from utils.utils import load_normalize_data, visualize_trajectory, get_random_crops,\
get_deterministic_crops , get_crop_distance , compute_iou,\
check_outside, _setup_fixed_games, visualize_batch
# Import agent utils
from utils.agent_utils import get_reward , compute_loss , \
take_step , check_if_done , \
rewards_to_go , get_policy ,\
get_action , run_eval_trajectory
from doerchnet.utils import sample_doerch_crops
from networks.rl_agent import AJRLAgent, PatchResNetAgent
from networks.agent import Agent
from networks.RandomAgent import RandomAgent
from networks.pretrained_resnet_agent import PretrainedResNetAgent
from networks.rnn_agents import LSTMAgent
from networks.deterministic_agent import DeterministicAgent
from utils.stat_collector import StatCollector
from torch.utils.tensorboard import SummaryWriter
from utils.training_utils import BatchStorage , EpisodeStorage
#Create a directory to save the info
if not os.path.exists(CONFIG.STATS_log_dir_base):
os.makedirs(CONFIG.STATS_log_dir_base)
os.makedirs(CONFIG.STATS_log_dir, exist_ok = False)
# Save visualizations in separate directory
vis_dir = os.path.join(CONFIG.STATS_log_dir, "visualizations")
os.makedirs(vis_dir)
scripts_dir = os.path.join(CONFIG.STATS_log_dir, "saved_scripts")
os.makedirs(scripts_dir)
# Save this training file
copyfile("training/train_agent.py" , os.path.join(scripts_dir, "train_agent.py"))
# Save config file
copyfile("config.py" , os.path.join(scripts_dir , "config.py"))
# Save network
copyfile("networks/early_rl_agents.py" , os.path.join(scripts_dir , "early_rl_agents.py"))
copyfile("networks/resnets.py" , os.path.join(scripts_dir, "resnets.py"))
copyfile("networks/rnn_agents.py", os.path.join(scripts_dir, "rnn_agents.py"))
# Save Utils files
copyfile("utils/utils.py", os.path.join(scripts_dir, "utils.py"))
copyfile("utils/training_utils.py" , os.path.join(scripts_dir , "training_utils.py"))
copyfile("utils/agent_utils.py" , os.path.join(scripts_dir, "agent_utils.py"))
# TODO - Add pretrained log dir
# Create folder for saving intermediate models
os.makedirs(os.path.join(CONFIG.STATS_log_dir, "models"))
metrics_dir = os.path.join(CONFIG.STATS_log_dir, "metrics")
os.makedirs(metrics_dir)
info = dict([
("AgentType" ,CONFIG.RL_agent_network),
("PatchEmbedder" , CONFIG.RL_patch_embedder),
("Completed" , False),
('Metrics' , [
'Steps',
'FinalDistanceOnlyFailure',
'IoU',
'ValSteps',
'ValFinalDistanceOnlyFailure',
'ValIoU',
# 'GradientSize',
#'GradientSize',
'CumulativeRewardToGo',
'HasConverged',
'StepRatioOnlySuccess',
#'StepTime',
#'PropTime',
#'CorrectActions',
'ActionsTaken',
'ValHasConverged',
'ValStepRatioOnlySuccess',
'ValCumulativeRewardToGo',
#'ValStepTime',
#'ValPropTime',
'FullValSteps',
'FullValIoU',
'FullValDistance',
'FullValHasConverged',
'ValActionsTaken',
#'ValCorrectActions',
'SeparatedHasConverged',
'SeparatedCumulativeRewardToGo',
'SeparatedSteps',
'SeparatedIoU',
'ValSeparatedHasConverged',
'ValSeparatedCumulativeRewardToGo',
'ValSeparatedSteps',
'ValSeparatedIoU',
]),
("StartedTraining" , str(datetime.now())),
("FinishedTraining" , 0),
("Dataset",CONFIG.MISC_dataset),
("MultiplyImages" , CONFIG.RL_multiply_images),
("NbrOfTrainableParameters" , 0),
("AgentClass" , "RL"),
("FullValIters", []) # At which iterations is the model evaluated on full validation
])
# Set random seed
random.seed(CONFIG.MISC_random_seed)
np.random.seed(CONFIG.MISC_random_seed)
torch.manual_seed(CONFIG.MISC_random_seed)
# load the dataset
trainloader,valloader = load_normalize_data(download = False, batch_size = CONFIG.RL_batch_size , multiply_images = CONFIG.RL_multiply_images)
valloader_iterator = iter(valloader)
im_H, im_W = CONFIG.MISC_im_size
p_H,p_W = CONFIG.MISC_patch_size
max_len_batch = CONFIG.RL_max_episode_length*CONFIG.RL_batch_size * CONFIG.RL_multiply_images
ep_len = CONFIG.RL_max_episode_length
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
# Make device globaly available
CONFIG.device = device
# Setup Agent
if CONFIG.RL_agent_network == 'RandomAgent':
agent_net = RandomAgent()
elif CONFIG.RL_agent_network == 'SpiralAgent':
agent_net = DeterministicAgent(mode = 'spiral')
else:
raise "Unknown RL agent selected."
agent_net = agent_net.to(device)
agent_parameters = filter(lambda p: p.requires_grad, agent_net.parameters())
params = sum([np.prod(p.size()) for p in agent_parameters])
# Add number of parameters to the info json
info['NbrOfTrainableParameters'] = int(params)
# Write info dictionary to log directory
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4)
# Setup StatCollector
metrics = info['Metrics']
exclude_prints = ['ActionsTaken' , 'ValActionsTaken', 'ValPropTime', 'ValStepTime', 'StepTime',
'CumulativeRewardToGo', 'ValCumulativeRewardToGo','StepRatio','ValStepRatio',
'HasConverged' , 'ValHasConverged','ValCorrectActions','CorrectActions'] # Does not print these statistics
tot_nbr_iter = CONFIG.RL_nbr_epochs * len(trainloader)
tot_itr = 0
sc = StatCollector(metrics_dir, tot_nbr_iter , print_iter = CONFIG.MISC_print_iter, exclude_prints = exclude_prints)
# Add all metrics to StatCollector
for metric in metrics:
sc.register(metric , {'type':'avg' ,'freq':'step'})
# Open statistics for dataset to find unnormalizing transform
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Storage objects for all actions, weights, rewards etcetera
batch = BatchStorage(sc)
episode = EpisodeStorage()
val_batch = BatchStorage(sc)
val_episode = EpisodeStorage()
# If enabled there will be an entropy bonus and a linear annealing of this bonus
entropy_bonus = CONFIG.RL_entropy
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
# Linear annealing
#entropy_anneal_k = (entropy_bonus - CONFIG.RL_entropy_lower ) / tot_nbr_iter
entropy_anneal_k_exp = math.exp( math.log( CONFIG.RL_entropy_lower / entropy_bonus) / tot_nbr_iter)
# Increase recursion limit for debugging
sys.setrecursionlimit(2000)
# Initialize all clocations to zero to avoid crashing later
loc_goal = None
loc_start = None
start_coord = None
goal_coord = None
rep_counter = 91
# Print out info regarding this training run
print("Starting training at:\t%s" % info['StartedTraining'])
print("Agent Network:\t%s" % CONFIG.RL_agent_network)
print("Patch Embedder:\t%s" % CONFIG.RL_patch_embedder)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
try:
for epoch in range(CONFIG.RL_nbr_epochs):
# Passes through the dataset in batches
for batch_counter,batch_data in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = batch_data
batch.initialize(batch_size = len(batch_images))
# Each image/episode is handled seperatly
for (episode_counter, episode_data) in enumerate(batch_images):
full_image = episode_data[None,:].to(device)
loc_start , loc_goal = None, None
# Initializes all training tensors and sets start and goal patch
episode.initialize(image = full_image, loc_goal = loc_goal, loc_start = loc_start)
done = False
# Run one episode of training
while not done:
# Get an action from the agent, given current trajectory
action, softmax_embedding = get_action(agent_net , episode)
# Update environment according to action
loc_next, reward, done = take_step(action , episode, softmax_embedding)
# Get the visible crop at current position
crop_current,loc_current = get_deterministic_crops( full_image, coords = loc_next[0])
# Update the episode
episode.update(action, reward, loc_current , crop_current)
# Finish the episode, count rewards to go, iou etcetera
episode.finish()
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Add result from episode to batch
batch.append_episode(episode)
batch.prepare_for_loss()
batch.store()
# Run the current model on one batch from the valloader
with torch.no_grad():
try:
images , (start_coords_, goal_coords_) = next(valloader_iterator)
except:
valloader_iterator = iter(valloader)
images , (start_coords_, goal_coords_) = next(valloader_iterator)
val_batch.initialize(batch_size=len(images))
for (counter_val , episode_data) in enumerate(images):
episode_image = episode_data[None , :].to(device)
start_coord , goal_coord = None, None
val_episode = run_eval_trajectory( episode_image, val_episode, agent_net, loc_start = start_coord , loc_goal= goal_coord)
val_batch.append_episode(val_episode)
if tot_itr % CONFIG.MISC_save_vis_iter == 0:
visualize_batch(val_batch, PATH = vis_dir, transform = unNormImage, save_name = 'val_batch_' + str(tot_itr))
# Save result
val_batch.store(mode = 'Val')
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
entropy_bonus *= entropy_anneal_k_exp
if tot_itr % CONFIG.MISC_print_iter == 0 or tot_itr == tot_nbr_iter:
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
batch.sc.print()
batch.sc.save()
# Increment total iteration counter by one
tot_itr += 1
# BATCH COMPLETE
except KeyboardInterrupt:
print("\nInterrupted")
while True:
i = input("\n Save model? (y/n)")
if i == "y":
print("Saving Model")
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
sys.exit(1)
elif i == "n":
print("Not Saving Model")
sys.exit(1)
print("No valid input")
print("Training finished!")
info['Completed'] = True
info["FinishedTraining"] = str(datetime.now())
# Write completed status to info.json
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4) # Save final model
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
| 12,201 | 33.179272 | 142 | py |
airloc | airloc-master/eval/eval_agent.py | from datetime import datetime
import random
import time
import numpy as np
import json
import os
import sys
import importlib
# matplotlib is used for debugging image inputs to networks
import matplotlib
import matplotlib.pyplot as plt
#matplotlib.use('TkAgg')
import torch
import torchvision.transforms as transforms
import argparse
from config import CONFIG
from utils.utils import load_normalize_data, visualize_batch, find_latest_log
# Import agent utils
from utils.agent_utils import run_eval_trajectory
from networks.agent import Agent
from networks.RandomAgent import RandomAgent
from networks.rnn_agents import LSTMAgent
from utils.stat_collector import StatCollector
from torch.utils.tensorboard import SummaryWriter
from utils.training_utils import BatchStorage, EpisodeStorage
from config import CONFIG
def replace_config(loaded_config):
for key in loaded_config.keys():
if 'EVAL' not in key and 'device' not in key and 'MISC_main_pid' not in key and not 'dataset' in key and not 'allowed_outside' in key:
CONFIG[key] = loaded_config[key]
def main(args ):
if args.seed:
seed = args.seed
else:
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Get the log in the correct position in the log dir
log_base = "logs"
if args.log_dir:
log_dir = args.log_dir
else:
log_dir = find_latest_log(log_base , args.n)
eval_metrics_path = os.path.join(log_base,log_dir,'metrics_eval')
os.makedirs(eval_metrics_path,exist_ok=True)
# Import the CONFIG file from the log
scripts_dir = os.path.join(log_base,log_dir)
networks_file_path = os.path.join(scripts_dir, "config.py")
spec = importlib.util.spec_from_file_location("config",networks_file_path)
config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config_module)
loaded_config = config_module.CONFIG
replace_config(loaded_config)
CONFIG.STATS_dir_base = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..')
CONFIG.STATS_log_dir_base = os.path.join(CONFIG.STATS_dir_base, log_base)
CONFIG.STATS_log_dir = os.path.join(CONFIG.STATS_log_dir_base,log_dir)
CONFIG.MISC_vis_per_batch = 1
CONFIG.MISC_vis_iter = 1
CONFIG.TERMINATE = False
vis_dir = os.path.join(log_base,log_dir, "visualizations")
# The info dictionary for what to store
info = dict([
("AgentType" ,CONFIG.RL_agent_network),
("PatchEmbedder" , CONFIG.RL_patch_embedder),
("Completed" , False),
('Metrics' , [
'EpisodeTime',
'StocSteps',
'DetSteps',
'StocIoU',
'DetIoU',
'StocStepRatioOnlySuccess',
'DetStepRatioOnlySuccess',
'StocFinalDistanceOnlyFailure',
'DetFinalDistanceOnlyFailure',
'StocCumulativeRewardToGo',
'DetCumulativeRewardToGo',
'StocHasConverged',
'DetHasConverged',
'StocDifficulty',
'DetDifficulty',
'StocActionsTaken',
'DetActionsTaken',
'StocSeparatedSteps',
'DetSeparatedSteps',
'StocSeparatedIoU',
'DetSeparatedIoU',
'StocSeparatedCumulativeRewardToGo',
'DetSeparatedCumulativeRewardToGo',
'StocSeparatedHasConverged',
'DetSeparatedHasConverged',
'StocGoalLoc',
'DetGoalLoc',
'StocActionProbs',
'DetActionProbs',
]),
("StartedEvalAt" , str(datetime.now())),
("FinishedTraining" , 0),
("Dataset",CONFIG.MISC_dataset),
("MultiplyImages" , CONFIG.RL_multiply_images),
("NbrOfTrainableParameters" , 0),
("AgentClass" , "RL"),
("FullValIters", []) # At which iterations is the model evaluated on full validation
])
_,valloader = load_normalize_data(download = False, batch_size = 1 ,
multiply_images = 1,split=args.split,
use_eval_split = args.eval_split
)
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
# Make device globaly available
CONFIG.device = device
# Setup Agent
if CONFIG.RL_agent_network == 'LSTMAgent':
agent_net = LSTMAgent()
elif CONFIG.RL_agent_network == 'Agent':
agent_net = Agent()
elif CONFIG.RL_agent_network == 'RandomAgent':
agent_net = RandomAgent()
else:
raise "Unknown RL agent selected."
agent_net.load_state_dict(torch.load(os.path.join(log_base,log_dir,'final_model')), strict=False)
agent_net.eval()
agent_net = agent_net.to(device)
metrics = info['Metrics']
exclude_prints = [
'StocFinalDistanceOnlyFailure',
'StocCumulativeRewardToGo',
'StocStepRatioOnlySuccess',
'StocActionsTaken',
'StocSeparatedSteps',
'StocSeparatedIoU',
'StocSeparatedCumulativeRewardToGo',
'StocSeparatedHasConverged',
'DetFinalDistanceOnlyFailure',
'DetStepRatioOnlySuccess',
'DetCumulativeRewardToGo',
'DetActionsTaken',
'DetSeparatedSteps',
'DetSeparatedIoU',
'DetSeparatedCumulativeRewardToGo',
'DetSeparatedHasConverged',
'StocGoalLoc',
'DetGoalLoc',
'StocActionProbs',
'DetActionProbs',
] # Does not print these statistics
num_passes = 1
tot_nbr_iter = num_passes* len(valloader)
tot_itr = 0
sc = StatCollector(eval_metrics_path, tot_nbr_iter , print_iter = CONFIG.MISC_print_iter, exclude_prints = exclude_prints)
# Add all metrics to StatCollector
for metric in metrics:
sc.register(metric , {'type':'avg' ,'freq':'step'})
# Open statistics for dataset to find unnormalizing transform
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Storage objects for all actions, weights, rewards etcetera
stoc_batch = BatchStorage(sc)
stoc_episode = EpisodeStorage()
det_batch = BatchStorage(sc)
det_episode = EpisodeStorage()
# Print out info regarding this training run
print("Starting eval at:\t%s" % info['StartedEvalAt'])
print("Agent Network:\t%s" % CONFIG.RL_agent_network)
print("Patch Embedder:\t%s" % CONFIG.RL_patch_embedder)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
misc_info = []
with torch.no_grad():
for epoch in range(num_passes):
# Passes through the dataset in batches
for batch_counter,batch_data in enumerate(valloader):
# Get the images from the batch
batch_images , (start_crops_ , goal_crops_) = batch_data
# Initialize the utils
stoc_batch.initialize(batch_size = len(batch_images))
det_batch.initialize(batch_size = len(batch_images))
# Loop over the batch of images
for (episode_counter, episode_data) in enumerate(batch_images):
episode_image = episode_data[None, :].to(device)
start_coord, goal_coord = start_crops_[0, :], goal_crops_[0, :]
# TODO: ENABLE NOT RUNNING STOC TO SAVE TIME !!
#stoc_episode = run_eval_trajectory(episode_image, stoc_episode, agent_net, loc_start=start_coord, loc_goal=goal_coord, deterministic=False)
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
#t1 = time.process_time()
det_episode = run_eval_trajectory(episode_image, det_episode, agent_net, loc_start=start_coord, loc_goal=goal_coord, deterministic=True)
if True:
misc_info.append(np.concatenate([det_episode.actions.cpu().detach().numpy(),
np.squeeze(det_episode.misc, axis=1),
det_episode.weights.cpu().detach().numpy()[:, np.newaxis],
det_episode.dists.cpu().detach().numpy()[:, np.newaxis]], axis=1))
#t2 = t1 - time.process_time()
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
#stoc_batch.append_episode(stoc_episode)
det_batch.append_episode(det_episode)
#stoc_batch.store(mode = 'Stoc',eval=True)
det_batch.store(mode = 'Det',eval=True)
#det_batch.sc.s('EpisodeTime').collect(t2)
if tot_itr % CONFIG.EVAL_save_vis_iter == 0 :
visualize_batch(det_batch, PATH = vis_dir, transform = unNormImage, save_name = 'eval_' + str(tot_itr), prefix='Det')
#visualize_batch(stoc_batch, PATH = vis_dir, transform = unNormImage, save_name = 'eval_' + str(tot_itr), prefix='Stoc')
if tot_itr % CONFIG.MISC_print_iter == 0 or tot_itr == tot_nbr_iter:
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
det_batch.sc.print()
det_batch.sc.save()
# Increment total iteration counter
tot_itr += 1
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
det_batch.sc.save()
stat_path = os.path.join(CONFIG.STATS_log_dir,'final_stats.txt')
with open(stat_path, 'a') as file:
print(f"Restults from {CONFIG.MISC_dataset} using {args.split}-set", file=file)
print(f"Restults from {CONFIG.MISC_dataset} using {args.split}-set")
det_batch.sc.print()
det_batch.sc.exclude_prints = None
det_batch.sc.print(path=stat_path)
np.save('misc_info', misc_info)
print("Evaluation completed!")
if __name__ == '__main__':
# This part is used to be able to simply generate split files for datasets
parser = argparse.ArgumentParser()
log_args = parser.add_mutually_exclusive_group()
log_args.add_argument( "-n", type = int, help = "Select log number",default = 0)
log_args.add_argument("--log_dir", type = str, help = "Select log name", default=None)
parser.add_argument("--no_cuda", action='store_true', help = "Disable cuda", default=False)
parser.add_argument("--seed", type = int, help = "Set seed", default=None)
parser.add_argument("--split", type = str, help = "Set split", default='val')
parser.add_argument("--eval_split", type = str, help = "Set split", default='basic')
args = parser.parse_args()
main(args)
| 11,306 | 40.417582 | 160 | py |
airloc | airloc-master/doerchnet/share_net.py |
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,785 | 24.796296 | 115 | py |
airloc | airloc-master/doerchnet/utils.py | import torch
import numpy as np
import torchvision.transforms as transforms
import os
import gc
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import urllib
import matplotlib
import random
from torch.nn.functional import one_hot
from config import CONFIG
from utils.utils import get_deterministic_crops
def sample_doerch_crops(images):
"""
note that if max_dist and min_iou are both provided, then only max_dist
will be used. hence, for min_iou to take effect, max_dist has to be none.
"""
# NEW IDEA! Only sample n/2 pairs of crops and for the next n/2 pairs pick the opposite
# correct action as in the previous.
# When below variable is enabled this new approach is used. False gives old
NEW_SAMPLING_APPROACH = False
# When enabled exactly same number of locs in the different positions
EQUAL_GOAL_LOC_DISTRIBUTION = False
if EQUAL_GOAL_LOC_DISTRIBUTION and NEW_SAMPLING_APPROACH:
raise(Exception("EQUAL_GOAL_LOC_DISTRIBUTION does not work with NEW_SAMPLING_APPROACH"))
# define some useful constants
h, w = CONFIG.MISC_patch_size
im_h, im_w = CONFIG.MISC_im_size
actions_one_hot = torch.zeros((images.shape[0],8))
n_chan = images.shape[1]
n_imgs = images.shape[0]
N = images.shape[0]
# initialize memory for the crops size = (batch, n_chan, h_p, w_p)
# keep the number of channels at a constant
crops_goal = torch.zeros(size=(n_imgs, n_chan, h, w))
crops_start = torch.zeros(size=(n_imgs, n_chan, h, w))
loc_crops_goal = torch.zeros(size = ( n_imgs , 4))
loc_crops_start = torch.zeros(size = (n_imgs , 4))
if EQUAL_GOAL_LOC_DISTRIBUTION:
for i in range(0, 8):
actions_one_hot[(N//8 * (i)):(N//8) *(i+1), i] = 1
# Randomize if any left
for i in range(N//8 * 8, N):
actions_one_hot[i , random.randint(0,7)] = 1
if NEW_SAMPLING_APPROACH:
N = images.shape[0]
for i in range(N // 2 + N % 2): # If odd sample one more
# image is divided into a static uniform grid. patches are sampled from this grid
upper_h , upper_w = int(im_h / h) - 1 , int(im_w / w) - 1
lower_h , lower_w = ( 0 , 0)
grid_loc = np.floor(np.random.uniform( low = [lower_h ,lower_w] , high = [upper_h , upper_w]))
goal_loc, action = sample_goal(grid_loc,upper_h,upper_w)
actions_one_hot[i,action] = 1
locs_start = np.concatenate(((grid_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
locs_goal = np.concatenate(((goal_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
loc_crops_start[i, :] = torch.tensor(np.array(locs_start, dtype = int))
loc_crops_goal[i, :] = torch.tensor(np.array(locs_goal, dtype = int))
crops_start[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_start[0], left=locs_start[1], height=locs_start[2], width=locs_start[3])
crops_goal[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_goal[0], left=locs_goal[1], height=locs_goal[2], width=locs_goal[3])
# Now we have sampled half the pairs we need. The next half should be sampled start crops
# But inverse of previous correct action
for i in range(N//2 + N % 2 , N):
upper_h , upper_w = int(im_h / h) - 1 , int(im_w / w) - 1
lower_h , lower_w = ( 0 , 0)
grid_loc = np.floor(np.random.uniform( low = [lower_h ,lower_w] , high = [upper_h , upper_w]))
# The following line is only difference
goal_loc, action = opposite_goal(grid_loc,torch.argmax(actions_one_hot[i - N//2, :]).item())
actions_one_hot[i,action] = 1
locs_start = np.concatenate(((grid_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
locs_goal = np.concatenate(((goal_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
loc_crops_start[i, :] = torch.tensor(np.array(locs_start, dtype = int))
loc_crops_goal[i, :] = torch.tensor(np.array(locs_goal, dtype = int))
crops_start[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_start[0], left=locs_start[1], height=locs_start[2], width=locs_start[3])
crops_goal[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_goal[0], left=locs_goal[1], height=locs_goal[2], width=locs_goal[3])
else:
# The old approach, all samples are uniform
for i in range(images.shape[0]):
# image is divided into a static uniform grid. patches are sampled from this grid
upper_h , upper_w = int(im_h / h) - 2 , int(im_w / w) - 2
lower_h , lower_w = ( 1 , 1)
grid_loc = np.floor(np.random.uniform( low = [lower_h ,lower_w] , high = [upper_h , upper_w]))
if EQUAL_GOAL_LOC_DISTRIBUTION:
goal_loc = map_grid_action_to_goal(grid_loc, torch.argmax(actions_one_hot[i, :]))
else:
goal_loc, action = sample_goal(grid_loc,upper_h,upper_w)
actions_one_hot[i,action] = 1
locs_start = np.concatenate(((grid_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
locs_goal = np.concatenate(((goal_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
loc_crops_start[i, :] = torch.tensor(np.array(locs_start, dtype = int))
loc_crops_goal[i, :] = torch.tensor(np.array(locs_goal, dtype = int))
crops_start[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_start[0], left=locs_start[1], height=locs_start[2], width=locs_start[3])
crops_goal[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_goal[0], left=locs_goal[1], height=locs_goal[2], width=locs_goal[3])
return crops_start,crops_goal,actions_one_hot ,loc_crops_start , loc_crops_goal
def get_label(loc_start,loc_goal, dim = 8):
"""
Given start and goal locations outputs the label for the doerchnet to predict.
Dimensions:
dim = 8: Eight possible locations close to start patch
dim = 25: Absolute prediction of location of goal patch
dim = 80: Relative prediction of location of goal patch compared to start patch.
"""
H, W = CONFIG.MISC_patch_size
step_sz = int(CONFIG.RL_softmax_step_size*H)
if dim == 8:
diff = (loc_goal[:,:2]- loc_start[:,:2] )/step_sz
diff += 1
inds = diff[:,0]*3 + diff[:,1]
actions = torch.zeros_like(inds)
for i,inds in enumerate(inds):
if inds == 0: actions[i] = 7
elif inds == 1: actions[i] = 0
elif inds == 2: actions[i] = 1
elif inds == 3: actions[i] = 6
elif inds == 4: raise(Exception("Same start and goal loc"))
elif inds == 5: actions[i] = 2
elif inds == 6: actions[i] = 5
elif inds == 7: actions[i] = 4
elif inds == 8: actions[i] = 3
actions = one_hot(actions.long(), dim).float()
elif dim == 25:
diff = loc_goal[:,:2]/step_sz
actions = diff[:,0]*5 + diff[:,1]
actions = one_hot(actions.long(), dim).float()
elif dim == 80:
# 0 - 80 from top left to lower right
# TODO Why not signed?
move = ((loc_goal[:,:2] - loc_start[:,:2]) / step_sz )
actions = torch.zeros((move.shape[0] , 9,9 ), dtype = torch.long) # NOTE - Current position still here
actions[torch.arange(move.shape[0]) , (4 + move[:,0]).long() , (4 + move[:,1]).long()] = 1
# Reshape to one hot encoding
actions = torch.flatten(actions , start_dim = 1)
# Check if any start and goal is at same position
if (actions[:, 4 * 9 + 4] == 1).any():
raise(Exception("Same start and goal location in get_label"))
else:
# Remove current position from label space
actions = torch.cat((actions[:,0:40] , actions[:,41:]), dim = 1).float()
else:
raise(Exception("UNexpected dimension in 'get_label':\t%d" % dim))
return actions
def opposite_goal(grid_loc, prev_action):
""" Select the opposing location."""
# 8 possible directions. Add four do modulo to find opposing side
action_idx = (prev_action + 4) % 8
goal_loc = map_grid_action_to_goal(grid_loc, action_idx)
return goal_loc, action_idx
def map_grid_action_to_goal(grid_loc , action):
step = CONFIG.RL_softmax_step_size
goal_loc = grid_loc.copy()
if action == 0: goal_loc += [-step,0]
elif action == 1: goal_loc += [-step,step]
elif action == 2: goal_loc += [0,step]
elif action == 3: goal_loc += [step,step]
elif action == 4: goal_loc += [step,0]
elif action == 5: goal_loc += [step,-step]
elif action == 6: goal_loc += [0,-step]
elif action == 7: goal_loc += [-step,-step]
return goal_loc
def sample_goal(grid_loc,upper_h,upper_w):
probs = np.ones((8))
# ensure no stepping outside the image
# TODO:Note Limit is dependent on upperh and upperw
if grid_loc[0] <= 0:
probs[-1] = 0
probs[0:2] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
if grid_loc[1] <= 0:
probs[ 5:] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
if grid_loc[0] >= upper_h:
probs[ 3:6] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
if grid_loc[1] >= upper_w:
probs[ 1:4] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
probs = probs/np.sum(probs)
action_idx = np.random.choice(range(8), p=probs)
goal_loc = map_grid_action_to_goal(grid_loc , action_idx)
return goal_loc, action_idx
def visualize_batch_doerch( imgs , locs_start , locs_goal , actions, transform = None , PATH = "vis" , save_name = 'vis', max_imgs = 8):
"""
Visualizes results from an entire batch during DoerchNet pretraining.
"""
n_imgs = min(imgs.shape[0] , max_imgs)
# Create new directory to save this batch visualization in
save_dir = os.path.join(PATH , save_name)
os.makedirs(save_dir)
imgs , locs_start, locs_goal , actions = imgs.cpu() , locs_start.cpu() , locs_goal.cpu() , actions.cpu()
# For each image make visualization
for i in range(n_imgs):
visualize_doerch(imgs[i,:] , locs_start[i,:], locs_goal[i,:], actions[i,:], transform = transform , PATH = save_dir, save_name = "vis_%d" % i)
def visualize_doerch(img , loc_start , loc_goal , action , transform = None, PATH = '.' , save_name = 'vis'):
"""
# Select first image:
action = actions[0,:]
loc_start = locs_start[0,:].cpu()
loc_goal = locs_goal[0,:].cpu()
img = imgs[0,:].detach().cpu()
"""
if transform is not None:
img = transform(img)
patch_size_tensor = torch.tensor(CONFIG.MISC_patch_size).cpu()
action_idx = torch.argmax(action).item()
loc_action = loc_start.detach().clone().cpu()
# given the action find the choosen location
if action.shape[0] == 8:
if action_idx == 0: loc_action[0:2] += torch.tensor([-1.1,0] ) * patch_size_tensor
elif action_idx == 1: loc_action[0:2] += torch.tensor([-1.1,1.1] ) * patch_size_tensor
elif action_idx == 2: loc_action[0:2] += torch.tensor([0,1.1]) * patch_size_tensor
elif action_idx == 3: loc_action[0:2] += torch.tensor([1.1,1.1] ) * patch_size_tensor
elif action_idx == 4: loc_action[0:2] += torch.tensor([1.1,0]) * patch_size_tensor
elif action_idx == 5: loc_action[0:2] += torch.tensor([1.1,-1.1] ) * patch_size_tensor
elif action_idx == 6: loc_action[0:2] += torch.tensor([0,-1.1] )* patch_size_tensor
elif action_idx == 7: loc_action[0:2] += torch.tensor([-1.1,-1.1] ) * patch_size_tensor
elif action.shape[0] == 25:
x,y = divmod(action_idx,5)
loc_action[0:2] = torch.tensor([x,y] ) * int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
elif action.shape[0] == 80:
if action_idx >= 40: action_idx += 1
x,y = divmod(action_idx,9)
x -= 4
y -= 4
loc_action[0:2] += torch.tensor([x,y] ) * int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
else:
raise(Exception("Unknown action space"))
# make sure integer value
loc_action = loc_action.long()
fig = plt.figure(figsize = (10, 6))
subfigs = fig.subfigures(1, 2)
ax = subfigs[0].subplots( )
ax.imshow(np.transpose(img, (1, 2, 0)))
rect_start = patches.Rectangle(xy=(loc_start[1], loc_start[0]), width=loc_start[3],
height=loc_start[2], linewidth=2, edgecolor='b', facecolor='none')
rect_goal = patches.Rectangle(xy=(loc_goal[1], loc_goal[0]), width=loc_goal[3],
height=loc_goal[2], linewidth=2, edgecolor='g', facecolor='none')
rec_action = patches.Rectangle(xy=(loc_action[1], loc_action[0]), width=loc_action[3],
height=loc_action[2], linewidth=1, edgecolor='y', facecolor='none')
# Add rectangles
ax.add_patch(rect_start)
ax.add_patch(rect_goal)
ax.add_patch(rec_action)
offset = CONFIG.MISC_patch_size[0] // 4
# TODO - Add text ???
ax.text(loc_start[1] + offset, loc_start[0] + offset + 5, f"Start", fontsize=18, color='w',rotation = 315,rotation_mode = 'anchor')
ax.text(loc_goal[1] + offset, loc_goal[0] + offset + 0 , f"Target", fontsize=18, color='w',rotation = 315,rotation_mode = 'anchor')
ax.text(loc_action[1] + offset, loc_action[0] + offset + 5, f"Agent", fontsize=18, color='w',rotation = 315,rotation_mode = 'anchor')
# Plot start and goal patch to the right
right_axes = subfigs[1].subplots(nrows = 2 , ncols = 1)
# get starat and goal crops
start_crop , _ = get_deterministic_crops(img.unsqueeze(0) , loc_start)
goal_crop , _ = get_deterministic_crops(img.unsqueeze(0) , loc_goal)
right_axes[0].imshow(start_crop.squeeze(0).permute(1,2,0))
right_axes[1].imshow(goal_crop.squeeze(0).permute(1,2,0))
right_axes[0].set_title("Start Crop")
right_axes[1].set_title("Goal Crop")
# Save figure
fig.savefig(os.path.join(PATH, save_name + '.png'))
# Close and clear up
plt.cla()
#plt.clf()
plt.close('all')
gc.collect()
def calculate_precision( outputs , labels):
"""
Calcualte accuracy(precision) of model for current batch for corners, boundaries and middle area.c:w
"""
corners = [0,4,20,24]
boundaries = [1,2,3,5,10,15,9,14,19,21,22,23]
middle = [6,7,8,11,12,13,16,17,18]
# Calcualte corner precision
corners_ind = labels[:,corners].any(dim = 1)
prec_corner = ( torch.argmax(outputs[corners_ind , :] , dim = 1) == torch.argmax(labels[corners_ind,:], dim = 1)).float().mean()
# Calcualte boundary precision
boundaries_ind = labels[:,boundaries].any(dim = 1)
prec_boundaries = ( torch.argmax(outputs[boundaries_ind , :] , dim = 1) == torch.argmax(labels[boundaries_ind,:], dim = 1)).float().mean()
# Calcualte corner precision
middle_ind = labels[:,middle].any(dim = 1)
prec_middle = ( torch.argmax(outputs[middle_ind , :] , dim = 1) == torch.argmax(labels[middle_ind,:], dim = 1)).float().mean()
return prec_corner.item() , prec_boundaries.item() , prec_middle.item()
if __name__ == '__main__':
# DEBUGGING get_label
H = 48
step_sz = int(CONFIG.RL_softmax_step_size*H)
loc1 = torch.tensor([[0,0]])
loc2 = torch.tensor([[0,1]]) * step_sz
loc3 = torch.tensor([[1,1]]) * step_sz
loc4 = torch.tensor([[0,3]]) * step_sz
comb1 = torch.cat((loc1,loc2))
comb2 = torch.cat((loc1,loc1))
comb3 = torch.cat((loc2 , loc3))
label1 = get_label(loc1, loc2, dim = 80)
label2 = get_label(loc3, loc1, dim = 80)
label3 = get_label(loc1, loc3, dim = 80)
label4 = get_label(comb1 , comb3, dim = 80)
label5 = get_label(comb2 , comb3 , dim = 80)
label6 = get_label(comb1, comb3 , dim = 80)
assert(label1[0,4*9 + 4] == 1)
assert(label2[0,3*9 + 3] == 1)
assert(label3[0,5*9 + 4] == 1)
pass
label7 = get_label(comb1, comb2)
| 16,618 | 40.967172 | 172 | py |
airloc | airloc-master/doerchnet/networks.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class AJNet(nn.Module):
def __init__(self,net_type,mode = 'train', unit_size = 128,num_classes =8 , both_branches = True):
super(AJNet,self).__init__()
# Allow for pretraining network with ground truth segmentation mask
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset == 'masa_filt':
self.n_chan = 4
else:
raise(Exception("Unkown dataset with segmentation info"))
else:
# Regular RGB inputs
self.n_chan = 3
self.mode = mode
self.both_branches = both_branches
# Choose the embedder for the Doerch net
# We start by assuming that we are using fixed weights
if net_type == 'Doerch':
self.start_enc = Encoder(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
elif net_type == 'Doerch2':
self.start_enc = Encoder2(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
else:
print(f"Unknown embedder {net_type} for this task")
exit(1)
if both_branches:
first_common_size = 2 * unit_size
else:
first_common_size = unit_size
self.relu = nn.ReLU()
self.common_fc_1 = nn.Linear(first_common_size, 2*unit_size)
self.common_fc_2 = nn.Linear(2*unit_size,num_classes)
def forward(self,start,goal, only_classify = False):
if self.both_branches:
start_emb = self.start_enc(start)
goal_emb = self.goal_enc(goal)
if self.both_branches:
common_emb = torch.cat([start_emb, goal_emb], dim = 1)
else:
common_emb = goal_emb
common_emb = self.relu(common_emb)
common_emb = self.common_fc_1(common_emb)
softmax_emb = self.common_fc_2(self.relu(common_emb))
softmax_emb = F.softmax(softmax_emb, dim = 1 )
# Sometimes we only want the classification
if only_classify:
return softmax_emb
else:
return common_emb , softmax_emb
class Encoder(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,16,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(16,32,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(32,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 5
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
class Encoder2(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder2,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,8,3,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(8,16,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(16,32,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(32,64,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Out comes shape 3x3x64
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(9*64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
| 4,630 | 27.763975 | 103 | py |
airloc | airloc-master/doerchnet/train.py | import os
import torch
import random
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from shutil import copyfile
from utils.utils import get_random_crops, get_deterministic_crops, load_normalize_data
from doerchnet.utils import sample_doerch_crops , visualize_doerch, get_label, visualize_batch_doerch, calculate_precision
from doerchnet.networks import AJNet
from doerchnet.share_net import ShareNet
import torch.optim as optim
from utils.stat_collector import StatCollector
from config import CONFIG
try:
# Set all hyperparameters here to not clutter the config file
net_type = 'ShareNet'
seed = 0
batch_size = 64
epochs = 100000
multiply_images = 4
# 8 dim -> local estimation of neibouring patches
# 25 dim -> global guess on where target is
# 80 dim -> Global relative to start
dim = 8
max_dist = 1
optimizer_type = 'adam'
learning_rate = 1e-4
lower_learning_rate_factor = 0.33
momentum = 0.95
beta1 = 0.9
beta2 = 0.999
print_iter = 100
vis_iter = 1000
# Set seeds
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
info = dict([
("NetType" , net_type),
("Completed" , False),
("Metrics", [
"Loss",
"ValLoss",
"Accuracy",
"ValAccuracy",
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
"AccuracyCorners",
"AccuracyBoundaries",
"AccuracyMiddle",
"ValAccuracyCorners",
"ValAccuracyBoundaries",
"ValAccuracyMiddle",
]),
("LogDir" , None),
("Blocks" , [2 ,2 , 2]),
("NbrParameters" , 0),
("LatentSpaceSize" , 0)
])
if dim not in [25 , 80]:
# Remove accuracy region metrics
for m in ["AccuracyCorners" , "AccuracyBoundaries" , "AccuracyMiddle"]:
info['Metrics'].remove(m)
info['Metrics'].remove("Val" + m)
metrics = info['Metrics']
# Function used to update leanring rate
def update_learning_rate(optimizer , learning_rate):
for params in opimizier.param_groups:
params['lr'] = learning_rate
# Find device
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
CONFIG.device = device
# Load data
trainloader,valloader = load_normalize_data(download = False, batch_size = batch_size , multiply_images = multiply_images)
print(f"Trainloader: {len(trainloader)}")
print(f"Valloader: {len(valloader)}")
# Save information about dataloaders
info['Dataset'] = CONFIG.MISC_dataset
info['ValLoaderLength'] = len(valloader)
info['TrainLoaderLength'] = len(trainloader)
valiter = iter(valloader)
tot_itr = 0
tot_nbr_itr = epochs * len(trainloader)
if net_type == 'ShareNet':
net = ShareNet(num_out_classes = dim)
else:
net = AJNet(net_type, num_classes=dim, both_branches = True)
# Count number of paramters
net_parameters = filter(lambda p: p.requires_grad, net.parameters())
info['NbrParameters'] = int(sum([np.prod(p.size()) for p in net_parameters]))
# Record latentspace size in info file
noise = torch.randn(1, net.n_chan , CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]).cpu()
latent , _ = net(noise,noise)
latentSpaceSize = int(np.prod(list(latent.shape)))
info['LatentSpaceSize'] = latentSpaceSize
info['InputSpaceSize'] = int(np.prod(list(noise.shape)))
net = net.to(device)
# Set optimizer
if optimizer_type == 'sgd':
optimizer = optim.SGD(net.parameters() , lr = learning_rate, momentum = momentum)
elif optimizer_type == 'adam':
optimizer = optim.Adam(net.parameters() , lr = learning_rate, betas =(beta1 , beta2))
else:
raise(Exception("Unknown optimizer type:\t%s" % optimizer_type))
# Select loss function
criterion = nn.CrossEntropyLoss()
# Setup stat collector
log_dir_name = os.path.basename(CONFIG.STATS_log_dir)
CONFIG.STATS_DORCH_log_dir = os.path.join(CONFIG.MISC_project_root_path, "doerchnet", "logs", log_dir_name)
os.makedirs(CONFIG.STATS_DORCH_log_dir)
exclude_prints = [
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
] # Does not print these statistics
CONFIG.STATS_DOERCH_vis_dir = os.path.join(CONFIG.STATS_DORCH_log_dir, "visualizations")
os.makedirs(CONFIG.STATS_DOERCH_vis_dir)
sc = StatCollector(CONFIG.STATS_DORCH_log_dir, tot_nbr_itr, print_iter = print_iter, exclude_prints = exclude_prints)
for metric in metrics:
sc.register(metric , {'type':'avg','freq':'step'})
# Enter log dir and write to file
info['LogDir'] = CONFIG.STATS_DORCH_log_dir
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info, io , indent = 4)
# Save all files
copyfile("doerchnet/train.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "train.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "config.py"))
copyfile("doerchnet/networks.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "networks.py"))
copyfile("doerchnet/share_net.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "share_net.py"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
patch_dims = (1,3,CONFIG.MISC_patch_size[0],CONFIG.MISC_patch_size[1])
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Decrease learning rate every 20 epoch
current_learning_rate = learning_rate
action_dist = torch.zeros(dim)
action_taken = torch.zeros(dim)
def update_learning_rate(optimizer , learning_rate):
for params in optimizer.param_groups:
params['lr'] = learning_rate
print("NetType:\t%s" % net_type)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
for epoch in range(epochs):
for (i,data) in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = data
batch_images = batch_images
# Get random crops
crops_start, locs_start = get_random_crops(batch_images)
crops_goal, locs_goal = get_random_crops(batch_images, locs_start, max_dist = max_dist)
actions = get_label(locs_start, locs_goal, dim = dim)
crops_start = crops_start.to(device)
crops_goal = crops_goal.to(device)
action_dist += actions.sum(dim=0)
temp_action_dist = action_dist / action_dist.sum()
actions = actions.to(device)
_ , outputs = net(crops_start,crops_goal)
loss = criterion(outputs, actions ).to(device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# here do visualization
if tot_itr % vis_iter == 0:
pass
#visualize_doerch(batch_images, locs_start, locs_goal ,outputs, unNormImage, save_name = 'train_vis_%d' % tot_itr, PATH = CONFIG.STATS_DORCH_log_dir)
sc.s('Loss').collect(loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(outputs , actions)
sc.s('AccuracyCorners').collect(prec_corner)
sc.s('AccuracyBoundaries').collect(prec_boundary)
sc.s('AccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
acc = (torch.argmax(actions , dim = 1, keepdim = True) == torch.argmax(outputs , dim = 1, keepdim = True)).float().mean()
# print(actions.argmax(dim=1,keepdim=True))
sc.s('Accuracy').collect(acc.item())
sc.s('ActionsTaken').collect(F.one_hot(outputs.argmax(dim = 1, keepdim = False ), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('CorrectActions').collect(actions.mean(dim = 0).cpu().numpy())
with torch.no_grad():
# Run one batch on the valloader aswell
try:
val_images, (start_crops_ , goal_crops_) = next(valiter)
except:
valiter = iter(valloader)
val_images, ( start_crops_ , goal_crops_) = next(valiter)
val_images = val_images.to(device)
# Get random crops
val_crops_start, val_locs_start = get_random_crops(val_images)
val_crops_goal, val_locs_goal = get_random_crops(val_images, val_locs_start, max_dist = max_dist)
val_actions = get_label(val_locs_start, val_locs_goal, dim = dim)
val_crops_start = val_crops_start.to(device)
val_crops_goal = val_crops_goal.to(device)
val_actions = val_actions.to(device)
_ , val_outputs = net(val_crops_start,val_crops_goal)
val_loss = criterion(val_outputs, val_actions ).to(device)
# Logging
sc.s('ValLoss').collect(val_loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(val_outputs , val_actions)
sc.s('ValAccuracyCorners').collect(prec_corner)
sc.s('ValAccuracyBoundaries').collect(prec_boundary)
sc.s('ValAccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
val_acc = (torch.argmax(val_actions , dim = 1, keepdim = True) == torch.argmax(val_outputs , dim = 1, keepdim = True)).float().mean()
sc.s('ValAccuracy').collect(val_acc.item())
sc.s('ValActionsTaken').collect(F.one_hot(val_outputs.argmax(dim = 1, keepdim = False), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('ValCorrectActions').collect(val_actions.mean(dim = 0).cpu().numpy())
# here do visualization
if tot_itr % vis_iter == 0:
# If segmentation information is enabled remove it and only visualize the RGB imaegs
if CONFIG.RL_priv_use_seg:
val_images = val_images[:,0:3,:]
visualize_batch_doerch(val_images, val_locs_start , val_locs_goal , val_outputs, unNormImage,PATH = CONFIG.STATS_DOERCH_vis_dir, save_name = "val_%d" % tot_itr)
if tot_itr % print_iter == 0 or tot_itr == tot_nbr_itr - 1:
print("Iteration:\t%d / %d" % ( tot_itr, tot_nbr_itr))
sc.print()
#print(action_dist)
sc.save()
tot_itr += 1
if tot_itr % 5000 == 0:
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
# Lower the learning rate NOTE not active
if (epoch + 1) % 10 == 0 and False:
current_learning_rate *= lower_learning_rate_factor
update_learning_rate(optimizer, current_learning_rate)
info['Completed'] = True
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info , io , indent = 4)
except:
# TODO - Use signal handlers instead so that we can propagate the exceptions
#raise
while True:
i = input("save the model")
if i=='y':
# Save the encoder and the decoder
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
print("model saved")
exit(1)
elif i == 'n':
print("Not saving")
exit(1)
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
| 12,721 | 36.528024 | 176 | py |
airloc | airloc-master/doerchnet/logs/with-sem-seg/share_net.py |
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,784 | 25.028037 | 115 | py |
airloc | airloc-master/doerchnet/logs/without-sem-seg/share_net.py |
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,755 | 24.518519 | 115 | py |
airloc | airloc-master/doerchnet/logs/without-sem-seg-pre-michael/share_net.py |
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,785 | 24.796296 | 115 | py |
airloc | airloc-master/doerchnet/logs/without-sem-seg-pre-michael/networks.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class AJNet(nn.Module):
def __init__(self,net_type,mode = 'train', unit_size = 128,num_classes =8 , both_branches = True):
super(AJNet,self).__init__()
# Allow for pretraining network with ground truth segmentation mask
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset == 'masa_filt':
self.n_chan = 4
else:
raise(Exception("Unkown dataset with segmentation info"))
else:
# Regular RGB inputs
self.n_chan = 3
self.mode = mode
self.both_branches = both_branches
# Choose the embedder for the Doerch net
# We start by assuming that we are using fixed weights
if net_type == 'Doerch':
self.start_enc = Encoder(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
elif net_type == 'Doerch2':
self.start_enc = Encoder2(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
else:
print(f"Unknown embedder {net_type} for this task")
exit(1)
if both_branches:
first_common_size = 2 * unit_size
else:
first_common_size = unit_size
self.relu = nn.ReLU()
self.common_fc_1 = nn.Linear(first_common_size, 2*unit_size)
self.common_fc_2 = nn.Linear(2*unit_size,num_classes)
def forward(self,start,goal, only_classify = False):
if self.both_branches:
start_emb = self.start_enc(start)
goal_emb = self.goal_enc(goal)
if self.both_branches:
common_emb = torch.cat([start_emb, goal_emb], dim = 1)
else:
common_emb = goal_emb
common_emb = self.relu(common_emb)
common_emb = self.common_fc_1(common_emb)
softmax_emb = self.common_fc_2(self.relu(common_emb))
softmax_emb = F.softmax(softmax_emb, dim = 1 )
# Sometimes we only want the classification
if only_classify:
return softmax_emb
else:
return common_emb , softmax_emb
class Encoder(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,16,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(16,32,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(32,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 5
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
class Encoder2(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder2,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,8,3,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(8,16,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(16,32,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(32,64,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Out comes shape 3x3x64
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(9*64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
| 4,630 | 27.763975 | 103 | py |
airloc | airloc-master/doerchnet/logs/without-sem-seg-pre-michael/train.py | import os
import torch
import random
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from shutil import copyfile
from utils.utils import get_random_crops, get_deterministic_crops, load_normalize_data
from doerchnet.utils import sample_doerch_crops , visualize_doerch, get_label, visualize_batch_doerch, calculate_precision
from doerchnet.networks import AJNet
from doerchnet.share_net import ShareNet
import torch.optim as optim
from utils.stat_collector import StatCollector
from config import CONFIG
try:
# Set all hyperparameters here to not clutter the config file
net_type = 'ShareNet'
seed = 0
batch_size = 64
epochs = 100000
multiply_images = 4
# 8 dim -> local estimation of neibouring patches
# 25 dim -> global guess on where target is
# 80 dim -> Global relative to start
dim = 8
max_dist = 1
optimizer_type = 'adam'
learning_rate = 1e-4
lower_learning_rate_factor = 0.33
momentum = 0.95
beta1 = 0.9
beta2 = 0.999
print_iter = 100
vis_iter = 1000
# Set seeds
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
info = dict([
("NetType" , net_type),
("Completed" , False),
("Metrics", [
"Loss",
"ValLoss",
"Accuracy",
"ValAccuracy",
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
"AccuracyCorners",
"AccuracyBoundaries",
"AccuracyMiddle",
"ValAccuracyCorners",
"ValAccuracyBoundaries",
"ValAccuracyMiddle",
]),
("LogDir" , None),
("Blocks" , [2 ,2 , 2]),
("NbrParameters" , 0),
("LatentSpaceSize" , 0)
])
if dim not in [25 , 80]:
# Remove accuracy region metrics
for m in ["AccuracyCorners" , "AccuracyBoundaries" , "AccuracyMiddle"]:
info['Metrics'].remove(m)
info['Metrics'].remove("Val" + m)
metrics = info['Metrics']
# Function used to update leanring rate
def update_learning_rate(optimizer , learning_rate):
for params in opimizier.param_groups:
params['lr'] = learning_rate
# Find device
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
CONFIG.device = device
# Load data
trainloader,valloader = load_normalize_data(download = False, batch_size = batch_size , multiply_images = multiply_images)
print(f"Trainloader: {len(trainloader)}")
print(f"Valloader: {len(valloader)}")
# Save information about dataloaders
info['Dataset'] = CONFIG.MISC_dataset
info['ValLoaderLength'] = len(valloader)
info['TrainLoaderLength'] = len(trainloader)
valiter = iter(valloader)
tot_itr = 0
tot_nbr_itr = epochs * len(trainloader)
if net_type == 'ShareNet':
net = ShareNet(num_out_classes = dim)
else:
net = AJNet(net_type, num_classes=dim, both_branches = True)
# Count number of paramters
net_parameters = filter(lambda p: p.requires_grad, net.parameters())
info['NbrParameters'] = int(sum([np.prod(p.size()) for p in net_parameters]))
# Record latentspace size in info file
noise = torch.randn(1, net.n_chan , CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]).cpu()
latent , _ = net(noise,noise)
latentSpaceSize = int(np.prod(list(latent.shape)))
info['LatentSpaceSize'] = latentSpaceSize
info['InputSpaceSize'] = int(np.prod(list(noise.shape)))
net = net.to(device)
# Set optimizer
if optimizer_type == 'sgd':
optimizer = optim.SGD(net.parameters() , lr = learning_rate, momentum = momentum)
elif optimizer_type == 'adam':
optimizer = optim.Adam(net.parameters() , lr = learning_rate, betas =(beta1 , beta2))
else:
raise(Exception("Unknown optimizer type:\t%s" % optimizer_type))
# Select loss function
criterion = nn.CrossEntropyLoss()
# Setup stat collector
log_dir_name = os.path.basename(CONFIG.STATS_log_dir)
CONFIG.STATS_DORCH_log_dir = os.path.join(CONFIG.MISC_project_root_path, "doerchnet", "logs", log_dir_name)
os.makedirs(CONFIG.STATS_DORCH_log_dir)
exclude_prints = [
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
] # Does not print these statistics
CONFIG.STATS_DOERCH_vis_dir = os.path.join(CONFIG.STATS_DORCH_log_dir, "visualizations")
os.makedirs(CONFIG.STATS_DOERCH_vis_dir)
sc = StatCollector(CONFIG.STATS_DORCH_log_dir, tot_nbr_itr, print_iter = print_iter, exclude_prints = exclude_prints)
for metric in metrics:
sc.register(metric , {'type':'avg','freq':'step'})
# Enter log dir and write to file
info['LogDir'] = CONFIG.STATS_DORCH_log_dir
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info, io , indent = 4)
# Save all files
copyfile("doerchnet/train.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "train.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "config.py"))
copyfile("doerchnet/networks.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "networks.py"))
copyfile("doerchnet/share_net.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "share_net.py"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
patch_dims = (1,3,CONFIG.MISC_patch_size[0],CONFIG.MISC_patch_size[1])
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Decrease learning rate every 20 epoch
current_learning_rate = learning_rate
action_dist = torch.zeros(dim)
action_taken = torch.zeros(dim)
def update_learning_rate(optimizer , learning_rate):
for params in optimizer.param_groups:
params['lr'] = learning_rate
print("NetType:\t%s" % net_type)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
for epoch in range(epochs):
for (i,data) in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = data
batch_images = batch_images
# Get random crops
crops_start, locs_start = get_random_crops(batch_images)
crops_goal, locs_goal = get_random_crops(batch_images, locs_start, max_dist = max_dist)
actions = get_label(locs_start, locs_goal, dim = dim)
crops_start = crops_start.to(device)
crops_goal = crops_goal.to(device)
action_dist += actions.sum(dim=0)
temp_action_dist = action_dist / action_dist.sum()
actions = actions.to(device)
_ , outputs = net(crops_start,crops_goal)
loss = criterion(outputs, actions ).to(device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# here do visualization
if tot_itr % vis_iter == 0:
pass
#visualize_doerch(batch_images, locs_start, locs_goal ,outputs, unNormImage, save_name = 'train_vis_%d' % tot_itr, PATH = CONFIG.STATS_DORCH_log_dir)
sc.s('Loss').collect(loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(outputs , actions)
sc.s('AccuracyCorners').collect(prec_corner)
sc.s('AccuracyBoundaries').collect(prec_boundary)
sc.s('AccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
acc = (torch.argmax(actions , dim = 1, keepdim = True) == torch.argmax(outputs , dim = 1, keepdim = True)).float().mean()
# print(actions.argmax(dim=1,keepdim=True))
sc.s('Accuracy').collect(acc.item())
sc.s('ActionsTaken').collect(F.one_hot(outputs.argmax(dim = 1, keepdim = False ), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('CorrectActions').collect(actions.mean(dim = 0).cpu().numpy())
with torch.no_grad():
# Run one batch on the valloader aswell
try:
val_images, (start_crops_ , goal_crops_) = next(valiter)
except:
valiter = iter(valloader)
val_images, ( start_crops_ , goal_crops_) = next(valiter)
val_images = val_images.to(device)
# Get random crops
val_crops_start, val_locs_start = get_random_crops(val_images)
val_crops_goal, val_locs_goal = get_random_crops(val_images, val_locs_start, max_dist = max_dist)
val_actions = get_label(val_locs_start, val_locs_goal, dim = dim)
val_crops_start = val_crops_start.to(device)
val_crops_goal = val_crops_goal.to(device)
val_actions = val_actions.to(device)
_ , val_outputs = net(val_crops_start,val_crops_goal)
val_loss = criterion(val_outputs, val_actions ).to(device)
# Logging
sc.s('ValLoss').collect(val_loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(val_outputs , val_actions)
sc.s('ValAccuracyCorners').collect(prec_corner)
sc.s('ValAccuracyBoundaries').collect(prec_boundary)
sc.s('ValAccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
val_acc = (torch.argmax(val_actions , dim = 1, keepdim = True) == torch.argmax(val_outputs , dim = 1, keepdim = True)).float().mean()
sc.s('ValAccuracy').collect(val_acc.item())
sc.s('ValActionsTaken').collect(F.one_hot(val_outputs.argmax(dim = 1, keepdim = False), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('ValCorrectActions').collect(val_actions.mean(dim = 0).cpu().numpy())
# here do visualization
if tot_itr % vis_iter == 0:
# If segmentation information is enabled remove it and only visualize the RGB imaegs
if CONFIG.RL_priv_use_seg:
val_images = val_images[:,0:3,:]
visualize_batch_doerch(val_images, val_locs_start , val_locs_goal , val_outputs, unNormImage,PATH = CONFIG.STATS_DOERCH_vis_dir, save_name = "val_%d" % tot_itr)
if tot_itr % print_iter == 0 or tot_itr == tot_nbr_itr - 1:
print("Iteration:\t%d / %d" % ( tot_itr, tot_nbr_itr))
sc.print()
#print(action_dist)
sc.save()
tot_itr += 1
if tot_itr % 5000 == 0:
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
# Lower the learning rate NOTE not active
if (epoch + 1) % 10 == 0 and False:
current_learning_rate *= lower_learning_rate_factor
update_learning_rate(optimizer, current_learning_rate)
info['Completed'] = True
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info , io , indent = 4)
except:
# TODO - Use signal handlers instead so that we can propagate the exceptions
#raise
while True:
i = input("save the model")
if i=='y':
# Save the encoder and the decoder
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
print("model saved")
exit(1)
elif i == 'n':
print("Not saving")
exit(1)
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
| 12,708 | 36.600592 | 176 | py |
airloc | airloc-master/segmentations/u_net.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 4)
self.down1 = Down(4, 8)
self.down2 = Down(8, 16)
self.down3 = Down(16, 32)
factor = 2 if bilinear else 1
self.down4 = Down(32, 64 // factor)
self.up1 = Up(64, 32 // factor, bilinear)
self.up2 = Up(32, 16 // factor, bilinear)
self.up3 = Up(16, 8 // factor, bilinear)
self.up4 = Up(8, 4, bilinear)
self.outc = OutConv(4, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
def get_latent_space_size(self):
noise = torch.randn((1,3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]))
# Go halfway through network and return latent shape
noise = self.inc(noise)
noise = self.down1(noise)
noise = self.down2(noise)
noise = self.down3(noise)
noise = self.down4(noise)
return list(noise.shape)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
| 4,019 | 29.687023 | 122 | py |
airloc | airloc-master/segmentations/utils.py | from config import CONFIG
import math
import json
import os
import time
import zipfile
import gc
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import random_split,Subset
import matplotlib
matplotlib.use('Agg')
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb
import urllib
from dateutil.parser import parse
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import cross_entropy
# Run on init, setup colors for segmentation classes
CLASS_COLORS_LIST = ['#3C1098', '#8429F6', '#6EC1E4', '#FEDD3A', '#E2A929', '#9B9B9B','#000000' ]
CLASS_COLORS_TENSOR = torch.tensor([ to_rgb(c) for c in CLASS_COLORS_LIST])
"""
labels = [int(to_rgb(l)[0]*255) for l in labels]
cls = range(1,len(labels)+1)
labels = dict(zip(labels,cls))
"""
def map_ohe_to_color(seg_mask):
""" Maps a ohe feature map with C channels to a RGB image with specific colors for all classes"""
if len(seg_mask.shape) > 3:
# Get seg mask details
num_images, num_classes, im_H, im_W = seg_mask.shape
#num_classes = torch.max(seg_mask).item() + 1
# Convert class values to RGB image, permute to put channel dim infront of width and height
rgb_images = CLASS_COLORS_TENSOR[torch.argmax(seg_mask , dim = 1)].permute(0, 3, 1 , 2)
else:
num_images, im_H, im_W = seg_mask.shape
rgb_images = CLASS_COLORS_TENSOR[seg_mask].permute(0,3,1,2)
return rgb_images
def visualize_segmentation_est(val_seg , est_seg , vis_name = "visualization" , transform = None, max_images = 5):
val_seg = val_seg[0:max_images].cpu()
est_seg = est_seg[0:max_images].cpu()
max_images = min(max_images, val_seg.shape[0])
# DUe to ndindex being very weird and bad we need a litle hack
if max_images == 1:
max_images = 2
turn_off_last_row = True
else:
turn_off_last_row = False
fig, axes = plt.subplots(max_images, 2)
# Transform images from channel wise ohe to RGB images
rgb_val_seg = map_ohe_to_color(val_seg)
# Check if still ohe
if len(est_seg.shape) > 3:
est_seg = torch.argmax(est_seg, dim = 1)
rgb_est_seg = map_ohe_to_color(est_seg)
for (i, axis_inds) in enumerate(np.ndindex(axes.shape)):
ix, iy = axis_inds
if turn_off_last_row and ix == 1:
axes[ix,iy].axis('off')
continue
curr_img = rgb_val_seg[ix,:] if iy == 0 else rgb_est_seg[ix,:]
axes[ix,iy].imshow(curr_img.permute(1,2,0))
if (ix,iy) == (0,0):
axes[ix,iy].set_title('Ground Truth')
if (ix,iy) == (0,1):
axes[ix,iy].set_title('Estimate Segmentation')
plt.savefig(os.path.join(CONFIG.STATS_SEG_log_dir , vis_name) + '.png')
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
def format_labels_for_loss(labels):
# If input is single channel, transform to ohe
if labels.shape[1] == 1:
n_classes = int(torch.max(labels).item()) + 1
label_out = torch.zeros((labels.shape[0] , n_classes , labels.shape[2],labels.shape[3]))
for i in range(n_classes):
label_out[:,i,:,:] = ((labels == i) * 1.0).squeeze(1)
label_out = label_out.long()
else:
label_out = labels
return label_out.to(CONFIG.device)
def compute_loss(est_mask , label_mask):
# For now only compute cross_entropy loss
loss = cross_entropy(est_mask , label_mask)
# TODO - Add boundary separation loss
return loss
def mask_image(image_batch, grid_size = 8):
p = 0.5
masked_batch = image_batch.clone().detach()
n_up = int(CONFIG.MISC_im_size[0]/grid_size)
for i_img in range(masked_batch.shape[0]):
masked = (np.random.uniform(size = (8,8)) > p )
#masked = np.resize(masked,CONFIG.MISC_im_size)
masked = masked.repeat(n_up, axis=0).repeat(n_up, axis=1)
masked_batch[i_img] = masked_batch[i_img] * torch.tensor(masked)
return masked_batch
# DICE Loss
class DiceLoss(nn.Module):
def __init__(self, class_weights = None):
super(DiceLoss, self).__init__()
self.class_weights = class_weights
if isinstance( self.class_weights, list):
self.class_weights = torch.tensor(self.class_weights)
self.class_weights = self.class_weights.to(CONFIG.device)
def forward(self, inputs , targets):
"""
Calculate Dice loss between inputs and targets assumed to be BxCxHxW
"""
if inputs.max() > 1 or inputs.min() < -1:
inputs = torch.sigmoid(inputs)
numerator = 2 * (inputs * targets).flatten( start_dim = 2 ).sum( dim = 2)
denominator = (inputs.sum(dim = (2,3)) + targets.sum(dim = (2,3)))
dice_loss = (numerator + 1.0) / (denominator + 1.0)
if not self.class_weights is None:
dice_loss = self.class_weights * dice_loss
return -dice_loss.mean()
def debug_plot(im):
matplotlib.use('TkAgg')
plt.imshow(im.permute(1,2,0))
plt.show()
| 5,241 | 27.802198 | 114 | py |
airloc | airloc-master/segmentations/unet_parts.py | """ Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
| 2,602 | 32.371795 | 122 | py |
airloc | airloc-master/segmentations/networks.py |
import random
from config import CONFIG
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from utils.utils import calculate_cnn_output_size
class BakeNet(nn.Module):
def __init__(self, n_out_chan = 2):
super(BakeNet ,self).__init__()
# network output is always in One Hot Encoding format
# Very simple demonstration network
modules = []
modules.append( nn.Conv2d(3 , 16 , 3 , 1 , padding = 1))
modules.append( nn.ReLU())
modules.append( nn.BatchNorm2d( 16 ))
modules.append( nn.Conv2d( 16 , 32 , 3 , 1 , padding = 1))
modules.append( nn.ReLU())
modules.append( nn.BatchNorm2d( 32))
modules.append( nn.Conv2d( 32 , 16 , 3 , 1 , padding = 1))
modules.append( nn.ReLU())
modules.append( nn.BatchNorm2d(16))
modules.append( nn.Conv2d( 16 , n_out_chan , 3, 1 , padding = 1))
modules.append(nn.Softmax(dim = 1))
self.seq = nn.Sequential(*modules)
def forward(self, x):
return self.seq(x)
def get_latent_space_size(self):
return 0 # Not clear latent space
| 1,229 | 25.170213 | 73 | py |
airloc | airloc-master/segmentations/train.py |
import os
import torch
import random
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.nn as nn
import json
import numpy as np
from torchinfo import summary
from shutil import copyfile
from utils.utils import get_random_crops, get_deterministic_crops, load_normalize_data
from segmentations.utils import compute_loss, format_labels_for_loss, visualize_segmentation_est
from segmentations.utils import DiceLoss
from segmentations.networks import BakeNet
from segmentations.u_net import UNet
import torch.optim as optim
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from utils.stat_collector import StatCollector
from config import CONFIG
# Set all hyperparameters here to not clutter the config file
try:
netType = 'UNet'
seed = 0
batch_size = 64
epochs = 10000
multiply_images = 2
optimizer_type = 'adam'
learning_rate = 1e-4
lower_learning_rate_factor = 0.33
momentum = 0.95
beta1 = 0.9
beta2 = 0.999
print_iter = 100
vis_iter = 125
# Set seeds
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
if not CONFIG.RL_priv_use_seg:
raise(Exception("ERROR: segmentation information not enabled!"))
info = dict([
("NetType" , netType),
("Completed" , False),
("Metrics", [
"Loss",
"ValLoss",
"Accuracy",
"ValAccuracy"
]),
("LogDir" , None),
("Blocks" , [1 ,1 , 1]),
("NbrParameters" , 0),
("LatentSpaceSize" , 0),
("NbrImagesInTrain", 0),
("NbrImagesInVal" , 0)
])
metrics = info['Metrics']
# Function used to update leanring rate
def update_learning_rate(optimizer , learning_rate):
for params in opimizier.param_groups:
params['lr'] = learning_rate
# Transform all input images
loadTransform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize(CONFIG.MISC_patch_size),
transforms.Normalize((0.5,0.5,0.5) , (0.5,0.5,0.5))
])
# Find device
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
CONFIG.device = device
# Load data
trainloader,valloader = load_normalize_data(download = False, batch_size = batch_size , multiply_images = multiply_images)
valiter = iter(valloader)
info['NbrImagesInTrain'] = len(trainloader) * batch_size
info['NbrImagesInVal'] = len(valloader) * batch_size
tot_itr = 0
tot_nbr_itr = epochs * len(trainloader)
if netType == 'BakeNet':
net = BakeNet()
elif netType == 'UNet':
net = UNet( n_channels = 3, n_classes = 2)
else:
raise(Exception("Unkown network type"))
# Count number of paramters
net_parameters = filter(lambda p: p.requires_grad, net.parameters())
info['NbrParameters'] = int(sum([np.prod(p.size()) for p in net_parameters]))
info['LatentSpaceSize'] = net.get_latent_space_size()
# Set optimizer
if optimizer_type == 'sgd':
optimizer = optim.SGD(net.parameters() , lr = learning_rate, momentum = momentum)
elif optimizer_type == 'adam':
optimizer = optim.Adam(net.parameters() , lr = learning_rate, betas =(beta1 , beta2))
else:
raise(Exception("Unknown optimizer type:\t%s" % optimizer_type))
# Select loss function
class_weights = [1,3]
#criterion = nn.MSELoss()
criterion = DiceLoss(class_weights)
# Setup stat collector
log_dir_name = os.path.basename(CONFIG.STATS_log_dir)
CONFIG.STATS_SEG_log_dir = os.path.join(CONFIG.MISC_project_root_path, "segmentations", "logs", log_dir_name)
os.makedirs(CONFIG.STATS_SEG_log_dir)
sc = StatCollector(CONFIG.STATS_SEG_log_dir, tot_nbr_itr, print_iter = print_iter)
for metric in metrics:
sc.register(metric , {'type':'avg','freq':'step'})
# Enter log dir and write to file
info['LogDir'] = CONFIG.STATS_SEG_log_dir
with open(os.path.join(CONFIG.STATS_SEG_log_dir, "info.json") , 'w') as io:
json.dump(info, io , indent = 4)
# Construct noise for network summary
noise = torch.randn(1,3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1])
# Print model summary to separate file
with open(os.path.join(CONFIG.STATS_SEG_log_dir , "network_summary") , 'w') as io:
print(summary(net, input_data = noise, verbose=0), file = io)
# Save all fileus
copyfile("segmentations/train.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "train.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "config.py"))
copyfile("segmentations/networks.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "networks.py"))
copyfile("segmentations/u_net.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "u_net.py"))
copyfile("segmentations/utils.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "utils.py"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Decrease learning rate every 20 epoch
current_learning_rate = learning_rate
def update_learning_rate(optimizer , learning_rate):
for params in optimizer.param_groups:
params['lr'] = learning_rate
net = net.to(device)
for epoch in range(epochs):
for (i,data) in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = data
batch_images = batch_images
# Get random crops
crops , _ = get_random_crops(batch_images)
# The input is the RGB image
inpt = crops[:,0:3,:,:].to(device)
# The ground truth are the segmentation mask
labels = crops[:,3:,:,:].to(device)
# Labels need to be transformed to correct format
labels = format_labels_for_loss(labels)
outputs = net(inpt)
loss = criterion(outputs, labels)
#loss = compute_loss(outputs , labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sc.s('Loss').collect(loss.item())
# Calculate and logg pixelwise accuracy
acc = (torch.argmax(labels, dim = 1) == torch.argmax(outputs, dim = 1)).float().mean()
sc.s('Accuracy').collect(acc.item())
with torch.no_grad():
# Run one batch on the valloader aswell
try:
val_images, (start_crops_ , goal_crops_) = next(valiter)
except:
valiter = iter(valloader)
val_images, ( start_crops_ , goal_crops_) = next(valiter)
val_images = val_images.to(device)
# Due to how the dataloading is devised the valloader has the same multiply images
# as the trainloader. During validation fixed crops are used therefore
# there is no need to run same image with same patch multiple times
val_images = val_images[0:-1:multiply_images ,:]
val_crops , _ = get_deterministic_crops(val_images, coords = start_crops_)
val_inpt = val_crops[:,0:3,:,:].to(device)
val_labels = val_crops[:,3:,:,:].to(device) # new added extra channel for image boundaries
val_labels = format_labels_for_loss(val_labels)
val_outs = net(val_inpt).to(device)
val_loss = criterion(val_outs, val_labels)
# Logging
sc.s('ValLoss').collect(val_loss.item())
val_acc = (torch.argmax(val_labels, dim = 1) == torch.argmax(val_outs, dim = 1)).float().mean()
sc.s('ValAccuracy').collect(val_acc.item())
if tot_itr % print_iter == 0 or tot_itr == tot_nbr_itr - 1:
print("Iteration:\t%d / %d" % ( tot_itr, tot_nbr_itr))
sc.print()
sc.save()
# Do visualize
if tot_itr % vis_iter == 0 or tot_itr == tot_nbr_itr - 1:
visualize_segmentation_est(val_labels , val_outs , vis_name = "vis_%d" % tot_itr)
tot_itr += 1
# Lower the learning rate
if (epoch + 1) % 10 == 0 and False:
current_learning_rate *= lower_learning_rate_factor
update_learning_rate(optimizer, current_learning_rate)
info['Completed'] = True
with open(os.path.join(CONFIG.STATS_SEG_log_dir, "info.json") , 'w') as io:
json.dump(info , io , indent = 4)
finally:
while(True):
user = input("\nSave model [yes/no]\n")
if user == 'y':
# Save the encoder and the decoder
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_SEG_log_dir, "final_unet"))
print("Model Saved")
break
elif user == 'n':
print("Model Not Saved")
break
print("\nTraining Completed!\n")
| 9,545 | 32.261324 | 126 | py |
airloc | airloc-master/segmentations/logs/sem-seg-model/u_net.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 4)
self.down1 = Down(4, 8)
self.down2 = Down(8, 16)
self.down3 = Down(16, 32)
factor = 2 if bilinear else 1
self.down4 = Down(32, 64 // factor)
self.up1 = Up(64, 32 // factor, bilinear)
self.up2 = Up(32, 16 // factor, bilinear)
self.up3 = Up(16, 8 // factor, bilinear)
self.up4 = Up(8, 4, bilinear)
self.outc = OutConv(4, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
def get_latent_space_size(self):
noise = torch.randn((1,3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]))
# Go halfway through network and return latent shape
noise = self.inc(noise)
noise = self.down1(noise)
noise = self.down2(noise)
noise = self.down3(noise)
noise = self.down4(noise)
return list(noise.shape)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
| 4,019 | 29.687023 | 122 | py |
airloc | airloc-master/utils/agent_utils.py |
import math
import os
import time
import zipfile
import warnings
import gc
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch.distributions import MultivariateNormal, OneHotCategorical
import matplotlib
import matplotlib.pyplot as plt
from torch.nn import CrossEntropyLoss
from config import CONFIG
from utils.utils import move_crop , get_deterministic_crops , compute_iou ,\
get_random_crops , get_frac_outside , visualize_trajectory , \
get_crop_distance , check_outside , project_into_image
def normalize_batch_weights(batch_weights , batch_dists):
if CONFIG.RL_batch_size != 1 or CONFIG.RL_multiply_images != 1:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
batch_weights = normalize_grid(batch_weights, batch_dists)
return batch_weights
def normalize_grid(batch_weights,batch_dists):
# Due to pytorch lacking nanstd we have to convert to numpy to do te tings
batch_dists_np = batch_dists.cpu().detach().numpy()
batch_weights_np = batch_weights.cpu().detach().numpy()
n_groups = 5
step = 1
lwr,upr = 0,step
for i in range(n_groups):
idx = np.all([(batch_dists_np <= upr) , (batch_dists_np >lwr)],axis =
0)
# Calculate nanstd separatly to make sure that it is'nt zero anywhere
nanstd = np.nanstd(batch_weights_np[idx] )
if nanstd == 0.0:
nanstd = 1
# Normalize weights for each step of the agent separatly
batch_weights_np[idx] = (batch_weights_np[idx] - np.nanmean(batch_weights_np[idx])) / nanstd
# Move to the next set of distances
lwr += step
upr += step
# Handle the largest as one group
idx = batch_dists_np >lwr
# Calculate nanstd separatly to make sure that it is'nt zero anywhere
nanstd = np.nanstd(batch_weights_np[idx] , axis = 0)
if nanstd == 0.0:nanstd = 1
# Normalize weights for each step of the agent separatly
batch_weights_np[idx] = (batch_weights_np[idx] - np.nanmean(batch_weights_np[idx])) / nanstd
# Convert back to tensor and send to device
batch_weights = torch.from_numpy(batch_weights_np).to(CONFIG.device)
return batch_weights
def get_policy(agent_net, episode):
# If softmax agent is enabled the polich is now a distribution over 8 different
# directions in which the agent can move.
# Get the output of the agent
output, softmax_embedding = agent_net(episode)
# Create policy distribution
policy = OneHotCategorical(probs = output)
return policy, softmax_embedding
def get_action(agent_net , episode, deterministic = False):
if deterministic:
action , softmax_embedding = agent_net(episode)
return action, softmax_embedding
else:
policy , softmax_embedding = get_policy(agent_net, episode)
samp = policy.sample()
if not CONFIG.RL_agent_allowed_outside:
outside = get_outside(episode).to(CONFIG.device)
if (samp * outside).sum() == 1 :
samp = policy.sample()
return samp, softmax_embedding
def get_outside(episode):
outside = torch.zeros([1,8])
x,y = episode.locs[episode.step,:2]/CONFIG.MISC_step_sz
if x == 0:
outside[0,7] = 1
outside[0,:2] = 1
if y == 0:
outside[0,5:] = 1
if x == 4:
outside[0,3:6] = 1
if y == 4:
outside[0,1:4] = 1
return outside
def map_action_to_move(action):
""" Maps the action which is a one hot encoded vector to a move in pixels."""
# This will be the move in pixels
c = torch.argmax(action).item()
step_sz = int(CONFIG.RL_softmax_step_size * CONFIG.MISC_patch_size[0])
# Translate selected action to a pixelwise move.
# Remeber, increasing y coordinate means moving down in image
if c == 0:
dx , dy = 0,-1 # Move up
elif c == 1:
dx , dy = 1 , -1 # Move up right
elif c == 2:
dx , dy = 1 , 0 # Move right
elif c == 3:
dx , dy = 1 , 1 # Move down right
elif c == 4:
dx , dy = 0 , 1 # Move down
elif c == 5:
dx , dy = -1 , 1 # Move down left
elif c == 6:
dx , dy = -1 , 0 # Move left
elif c == 7:
dx , dy = -1 , -1 # Move up left
else:
raise(Exception("Invalid action:\t%d" % c))
move = torch.tensor([dy , dx])
# Now we have direction, multiply with patch size to get correct distance
# Also hyperparameter to control step size
move = step_sz * move
return move
def take_step(action , episode, softmax_embedding=None):
# Calculate the new location
action_in = action
# The action is a oneHotEncoding of in which direction the agent should move
# Map the action to a move in (dx,dy) and add to previous position
move = map_action_to_move(action)[None,:]
loc_next = episode.loc_current.clone().detach()
loc_next[0,0:2] += move[0,:]
# Calculate the reward for this action
reward = get_reward(loc_next, episode, action_in)
# Check if the episode has been completed
done = check_if_done(loc_next, episode)
return loc_next, reward, done
def check_if_done(loc_next , episode):
# If overlap with goal is significant we are done
iou = compute_iou(loc_next, episode.loc_goal ).item()
done = iou >= CONFIG.RL_done_iou
# If we have reached the maximum number of steps the episode has ended
return done or (episode.step + 1 >= CONFIG.RL_max_episode_length)
def get_reward(loc_next, episode, action):
# Rewards are partially based on distances
prev_dist = get_crop_distance(episode.loc_current[0], episode.loc_goal[0])
next_dist = get_crop_distance(loc_next[0], episode.loc_goal[0])
# TODO: Add max dist which is in regard to the goal and start patches
max_dist = np.sqrt(np.prod(np.array(CONFIG.MISC_im_size) - np.array(CONFIG.MISC_patch_size)))
iou = compute_iou(loc_next , episode.loc_goal).item()
if iou > 0.2:
reward = CONFIG.RL_reward_step + CONFIG.RL_reward_iou_scale * iou
else:
reward = CONFIG.RL_reward_step
if iou > CONFIG.RL_done_iou:
reward += CONFIG.RL_reward_goal
elif episode.step + 1 >= CONFIG.RL_max_episode_length:
reward += CONFIG.RL_reward_failed
if ( prev_dist > next_dist):
reward += CONFIG.RL_reward_closer
if CONFIG.RL_reward_distance:
reward += CONFIG.RL_reward_goal*(max_dist - next_dist)/ max_dist
return reward
def update_net(batch , agent_net, optimizer, entropy_bonus = None):
loss = 0
# Log the entropy of taken action
entropy_taken_actions = torch.zeros(CONFIG.RL_batch_size * CONFIG.RL_multiply_images * CONFIG.RL_max_episode_length)
action_counter = 0
eps_counter = 0
# Get one trajectory, calculate loss for each time step and add to global loss
for ep_id in range(batch.idx):
eps_counter += 1
for step_id in range(1 , batch.steps[ep_id].int() + 1):
# Get the episode, the action and the weight
ep , action , weight = batch.get_episode(ep_id , step_id)
# Get the corresponding policy
policy , softmax_embedding = get_policy(agent_net , ep)
# Get log probability of taken action
logp = policy.log_prob(action)
# Add to loss with weight
loss -= logp * weight
# Calculate entropy for logging (and possibly for entropy bonus)
# entropy = - policy.probs * policy.logits
entropy = policy.entropy()
entropy_taken_actions[action_counter] = entropy
if entropy_bonus is not None and entropy_bonus != 0:
loss -= entropy_bonus * entropy
action_counter += 1
# If the agent is of type RNN reset the hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Update the network with the correct frequency
if CONFIG.RL_nbr_eps_update == eps_counter:
loss = loss / action_counter
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch.sc.s('Loss').collect( loss.item() )
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
loss = 0
action_counter = 0
eps_counter = 0
if (CONFIG.RL_nbr_eps_update //2) <= eps_counter or (batch.idx == eps_counter ):
loss = loss / action_counter
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch.sc.s('Loss').collect( loss.item() )
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
loss = 0
action_counter = 0
else:
pass
#print("Skipping batch with %d episodes" % eps_counter)
def compute_loss(batch , agent_net, entropy_bonus = None):
loss = 0
# Log the entropy of taken action
entropy_taken_actions = torch.zeros(CONFIG.RL_batch_size * CONFIG.RL_multiply_images * CONFIG.RL_max_episode_length)
action_counter = 0
# Get one trajectory, calculate loss for each time step and add to global loss
for ep_id in range(batch.idx):
for step_id in range(1 , batch.steps[ep_id].int() + 1):
# Get the episode, the action and the weight
ep , action , weight = batch.get_episode(ep_id , step_id)
# Get the corresponding policy
policy , softmax_embedding = get_policy(agent_net , ep)
# Get log probability of taken action
logp = policy.log_prob(action)
# Add to loss with weight
loss -= logp * weight
# Calculate entropy for logging (and possibly for entropy bonus)
# entropy = - policy.probs * policy.logits
entropy = policy.entropy()
entropy_taken_actions[action_counter] = entropy
if entropy_bonus is not None and entropy_bonus != 0:
loss -= entropy_bonus * entropy
action_counter += 1
# If the agent is of type RNN reset the hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Log the entropy
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
loss = loss / action_counter
batch.sc.s('Loss').collect( loss.item() )
return loss
def map_grid_dist_to_ohe( grid_dist):
ohe = torch.zeros((1, 8))
c = torch.zeros((1))
#dist_diag = grid_dist * torch.tensor([[1],[1]]) / 1.4142
#dist_diag_2 = grid_dist * torch.tensor([1,-1]) / 1.4142
# For now correct step is diagonal if possible
# grid_dist = dy , dx
if grid_dist[0] < 0 and grid_dist[1] == 0:
c[0] = 0 # up
elif grid_dist[0] < 0 and grid_dist[1] > 0:
c[0] = 1 # right up
elif grid_dist[0] == 0 and grid_dist[1] > 0:
c[0] = 2 # right
elif grid_dist[0] > 0 and grid_dist[1] > 0:
c[0] = 3 # right down
elif grid_dist[0] > 0 and grid_dist[1] == 0:
c[0] = 4 # down
elif grid_dist[0] > 0 and grid_dist[1] < 0:
c[0] = 5 # left down
elif grid_dist[0] == 0 and grid_dist[1] < 0:
c[0] = 6 # left
elif grid_dist[0] < 0 and grid_dist[1] < 0:
c[0] = 7
else:
raise(Exception("Invalid action:\t%s" % grid_dist))
return c.long()
"""
def compute_loss(batch , agent_net, entropy_bonus = None):
loss = 0
# Log the entropy of taken action
entropy_taken_actions = torch.zeros(CONFIG.RL_batch_size * CONFIG.RL_multiply_images * CONFIG.RL_max_episode_length)
action_counter = 0
# Get one trajectory, calculate loss for each time step and add to global loss
for ep_id in range(batch.idx):
for step_id in range(1 , batch.steps[ep_id].int() + 1):
# Get the episode, the action and the weight
ep , action , weight = batch.get_episode(ep_id , step_id)
# Get the corresponding policy
policy = get_policy(agent_net , ep)
# Get log probability of taken action
logp = policy.log_prob(action)
# Add to loss with weight
loss -= logp * weight
# Calculate entropy for logging (and possibly for entropy bonus)
# entropy = - policy.probs * policy.logits
entropy = policy.entropy()
entropy_taken_actions[action_counter] = entropy
if entropy_bonus is not None and entropy_bonus != 0:
loss -= entropy_bonus * entropy
action_counter += 1
# If the agent is of type RNN reset the hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Log the entropy
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
return loss/batch.idx
"""
"""
Calculates the rewards from step until finish given the reward of the trajectory.
"""
def rewards_to_go(rewards):
rtg = torch.zeros_like(rewards).to(CONFIG.device)
for i in range(len(rewards)):
# First get gamma
discount = torch.pow(CONFIG.RL_discount_factor , torch.arange(0 , len(rewards)-i)).to(CONFIG.device)
rtg[i] = torch.sum( rewards[i:] * discount)
# Normalize per action here?
# Or in main loop?
return rtg
""" Run a trajectory in a search area """
def run_eval_trajectory(image,episode, agent_net, deterministic = CONFIG.RL_eval_deterministic, loc_start = None, loc_goal = None, probs_diff = None):
episode.initialize(image = image , loc_start=loc_start, loc_goal = loc_goal, probs_diff = probs_diff)
# Execute episode
done = False
while not done:
# Get an action from the agent
action, softmax_embedding = get_action(agent_net, episode, deterministic)
# Update the environment according to the correct action
loc_next, reward, done = take_step(action, episode, softmax_embedding)
# Get the crop at the current location
crop_current, loc_current = get_deterministic_crops(image, coords=loc_next[0])
# Update the episode storage
try:
tmp = torch.nn.Softmax( dim = 1 )(softmax_embedding).cpu().detach().numpy()
except:
tmp = np.zeros((1, 8))
episode.update(action, reward, loc_current, crop_current, misc=tmp)
# Episode done return results
episode.finish()
return episode
""" Used to freeze or unfreeze parts of the network """
def set_freezed_parts_of_net(net , mode = 'none'):
# Mode determines which parts should be froozen
# mode = 'patch' - Freezes patch embedder everything else unfroozen
# mode = 'policy' - Freezes all that is not the patch embedder
# mode = 'none' - unfreezes all parts of the network
for child in net.children():
if child == self.patch_emb:
if mode == 'patch':
for parameter in child.parameters():
parameter.requires_grad = False
else:
parameter.requires_grad = True
else:
if mode == 'policy':
for parameter in child.parameters():
parameter.requires_grad = False
else:
parameter.requires_grad = True
def visualize_cnn_filter(conv_layer, filter = 0, save_name = 'filter_vis.png', show = True):
""" Plots the weights of a filter in a convolutional layer."""
input_channels_ploted = min( 16 , conv_layer.weight.shape[1])
filter = conv_layer.weight[filter,:]
filter_ = filter[0:input_channels_ploted,:,:].detach().clone().cpu().permute(1,2,0).numpy()
n_rows = int(math.sqrt(input_channels_ploted))
n_cols = int( input_channels_ploted / n_rows) + int( input_channels_ploted % n_rows != 0)
matplotlib.use('TkAgg') if show else None
fig, axes = plt.subplots(n_rows , n_cols)
for (i , ax_inds) in enumerate(np.ndindex(axes.shape)):
axes[ax_inds].imshow(filter_[:,:,i])
axes[ax_inds].set_title("Input Channel %d" % i)
if show:
plt.show()
if False: # for now....
plt.savefig(os.path.join(CONFIG.STATS_log_dir, "filter_visualizations", save_name))
# Clean up
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
| 16,432 | 31.158513 | 150 | py |
airloc | airloc-master/utils/dataset_utils.py | import os
import random
import torch
from PIL import Image
import pandas as pd
from torch.utils.data.dataset import Dataset
from torchvision import transforms
import torchvision.transforms.functional as F
import numpy as np
import glob
from config import CONFIG
import argparse
import sys
import time
class DubaiSeven(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "dubai_seven")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
if transform is None:
self.tt = transforms.ToTensor()
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
else:
image = self.tt(image)
pass
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class MasaFilt(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "masa_filt")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# TODO Fix split_images.py
# Due to the labels containing some smoothed pixels we do a round to nereast integer here
seg = torch.round(seg).float()
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Round labels in the image mask. They are smoothed by the interpolation
if CONFIG.RL_priv_use_seg:
seg = image[-1,:,:]
seg = torch.round(seg).float()
image[-1,:,:] = seg
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class MasaSeven(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "masa_seven")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
if transform is None:
self.tt = transforms.ToTensor()
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
else:
image = self.tt(image)
pass
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class Masa(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False):
dataset_root_path = os.path.join(dataset_root_path , "masa")
# If grid game enabled laod that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class Dubai(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split =
'train' , transform = None , generate_new_split_file = False,
use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "dubai")
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
# Hmm maybe should inherit ImageFolder... but decided not to
class CustomDataset():
def __init__(self, datasets_root_path , custom_dataset_name,split = 'train' , transform = None, custom_split_file = None):
dataset_root_path = os.path.join(datasets_root_path , "Custom", custom_dataset_name)
if not os.path.exists(datasets_root_path):
os.makedirs(datasets_root_path)
# Try to locate this specific custom dataset
if not os.path.exists(dataset_root_path):
print("Unable to find this dataset:\t%s" % custom_dataset_name)
exit(1)
if custom_split_file is None:
# No custom split file selected use standard split file
# If grid game enabled laod that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
else:
# Use custom split file
partition_file_path = os.path.join(dataset_root_path , custom_split_file)
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image" )
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir , self.image_list[idx] )
image = Image.open(image_path)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , (start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = os.path.join(self.base_dir ,self.image_list[idx])
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img ,filepath )
class MasaFull(Dataset):
def __init__(self, datasets_root_path , split = 'train' , transform = None , randomRotation = False ):
dataset_root_path = os.path.join(datasets_root_path , "masa_full")
self.randomRotation = randomRotation
if not os.path.exists(datasets_root_path):
os.makedirs(datasets_root_path)
# Also pngs are available
image_folder_path = os.path.join(dataset_root_path, "tiff")
# Dataset is already split no need for split file
if split == 'train':
image_folder_path = os.path.join(image_folder_path , "train")
elif split == 'val':
image_folder_path = os.path.join(image_folder_path , "val")
elif split == 'test':
image_folder_path = os.path.join(image_folder_path , "test")
else:
raise(Exception("Unknown split:\t%s" % split))
# File names
self.image_list = [os.path.join(image_folder_path , x) for x in os.listdir(image_folder_path)]
# Random Crop
self.rc = transforms.RandomCrop(size = [500 , 500])
# from PIL to tensor transform
self.tt = transforms.ToTensor()
self.transform = transform
def __getitem__(self, idx):
# Load the image
image = Image.open(self.image_list[idx])
# Make it tensor
#image = self.tt(image)
# If random rotation is enabled. do it
if self.randomRotation:
#angle = torch.rand(size = (1,)).item() * 90.0
angle = torch.randint( low = -180 , high = 180 , size = (1,)).float().item()
image = F.rotate(image , angle , fill = None)
# Get center crop dimensions
while ( angle < 0 or angle > 90):
angle = angle + 90 if angle < 0 else angle
angle = angle - 90 if angle > 90 else angle
size = int( image.size[-1] / (np.cos(np.radians(angle)) + np.sin(np.radians(angle))) )
image = F.center_crop(image , [size , size])
# Now do random 500x500 crop
image = self.rc(image)
# Do regular data augmentation if available
if self.transform:
image = self.transform(image)
else:
image = self.tt(image)
# No start and goal crops. Only for show
return (image, ([],[]))
def __len__(self):
return len(self.image_list)
class ImagesPre(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False, post_instead = False):
if post_instead:
dataset_root_path = os.path.join(dataset_root_path , "images_post")
else:
dataset_root_path = os.path.join(dataset_root_path , "images_pre")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for ImagesPre:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
#image = np.array(Image.open(image_path))
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
print("No semantic masks for this dataset")
sys.exit(0)
# If transformer available use it
if self.transform is not None:
#print("AAA", image.shape, np.min(image), np.max(image))
image = self.transform(image)
#print(type(image))
#print("BBB", torch.min(image), torch.max(image))
#time.sleep(999)
# Round labels in the image mask. They are smoothed by the interpolation
if CONFIG.RL_priv_use_seg:
seg = image[-1,:,:]
seg = torch.round(seg).float()
image[-1,:,:] = seg
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
def _generate_split_file( split_file_path , grid_game = False):
# Generates a separate csv file for the split with crop locations
# Useful for creating a static validation set (or training if you want)
# Read partition file
# list_eval_partition: 0: training, 1: validation, 2:test
try:
dataframe = pd.read_csv(split_file_path)
except FileNotFoundError:
raise(FileNotFoundError("Eval partion file not found:\t%s" % split_file_path ))
# Generate start and goal crops, insert into dataframe and write back to file
N = dataframe.shape[0]
H_img , W_img = CONFIG.MISC_im_size
H_patch , W_patch = CONFIG.MISC_patch_size
# To make sure crops always are in same location use separate random number generator with hardcoded seed
num_gen = np.random.default_rng(0)
# If the grid_game flag is enabled divide image into uniform grid and sample patch location from that gird
if grid_game:
grid_H , grid_W = int(H_img / H_patch) , int(W_img / W_patch)
grid_locs = np.floor( np.random.uniform( low = [0,0, 0,0] , high = [grid_H , grid_W, grid_H ,grid_W] , size = (N, 4)))
# TODO - Add so that start and goal does not get on same patch
#number_same_patch = ((grid_locs[:,0] == grid_locs[:,2]) & (grid_locs[:,1] == grid_locs[:,3])).sum()
# Convert from grid location to pixel location
crop_locs = (grid_locs * np.array(2*CONFIG.MISC_patch_size)).astype('int64')
#crop_locs = np.concatenate( (start_crop_loc , top_left_loc + np.array(CONFIG.MISC_patch_size)) ,axis = 1).astype('int64')
# Also alter split file name and append 'grid'
dirname = os.path.dirname(split_file_path)
filename = os.path.basename(split_file_path)
temp = filename.split('.') # Assumes only single dot in filename
new_filename = temp[0] + '_grid.' + temp[1]
split_file_path = os.path.join(dirname ,new_filename)
else:
# Else just sample any location in the image
# ( start_x , start_y , goal_x , goal_y)
crop_locs = num_gen.integers((0,0,0,0) , (H_img-H_patch , W_img-W_patch , H_img-H_patch , W_img - W_patch) , size = (N , 4))
# Insert into dataframe
dataframe['start_x'] = crop_locs[:,0]
dataframe['start_y'] = crop_locs[:,1]
dataframe['goal_x'] = crop_locs[:,2]
dataframe['goal_y'] = crop_locs[:,3]
# Write back!
dataframe.to_csv(split_file_path, index = False)
def _create_base_split_file( datasets_root_path = None, dataset = None, relative_image_path = None):
datasets_root_path = datasets_root_path if datasets_root_path is not None else CONFIG.MISC_dataset_path
dataset = dataset if dataset is not None else CONFIG.MISC_dataset
relative_image_path = relative_image_path if relative_image_path is not None else "image/*"
dataset_root_path = os.path.join(datasets_root_path , dataset)
image_list = glob.glob(os.path.join(dataset_root_path,relative_image_path))
# Make sure only to keep relative name
image_list = list(map(os.path.basename , image_list))
nbr_img = len(image_list)
partition = np.zeros(nbr_img)
partition[int(nbr_img*0.7):int(nbr_img*0.85)] += 1
partition[int(nbr_img*0.85):] +=2
random.shuffle(partition)
data = {"image_id":image_list,
"partition":partition}
dataframe = pd.DataFrame(data)
split_file_path = os.path.join(dataset_root_path,'list_eval_partition.csv')
dataframe.to_csv(split_file_path,index = False)
if __name__ == '__main__':
# This part is used to be able to simply generate split files for datasets
# Set random seed
random.seed(CONFIG.MISC_random_seed)
np.random.seed(CONFIG.MISC_random_seed)
torch.manual_seed(CONFIG.MISC_random_seed)
parser = argparse.ArgumentParser()
parser.add_argument("--dataset" , "-d", type = str, help = "Select dataset")
parser.add_argument("--grid-game" ,"-g" , action = 'store_true', default = False)
# DEBUGGING
parser.add_argument("--debug-masa-full" , action = 'store_true' , default = False)
parser.add_argument("--random-rotation" , action = 'store_true' , default = False)
args = parser.parse_args()
if args.debug_masa_full:
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
torch.manual_seed(0)
d_rot = MasaFull(".." , randomRotation = True)
d_no_rot = MasaFull(".." , randomRotation = False)
for i in range(10):
image_rot = d_rot.__getitem__(i)
image_no_rot = d_no_rot.__getitem__(i)
fig , axes = plt.subplots(1,2)
axes[0].imshow(image_rot.permute(1,2,0))
axes[1].imshow(image_no_rot.permute(1,2,0))
plt.show()
plt.close('all')
known_datasets = ['dubai','masa','masa_filt','masa_seven','dubai_seven', 'images_pre', 'images_post']
if not args.dataset is None:
# Check if custom or regular dataset
if (args.dataset.startswith('custom_')):
# Check that custom dataset actually existss
if not os.path.exists(os.path.join(CONFIG.MISC_dataset_path , "Custom", args.dataset[7:])):
print("Dataset does not exists:\t%s" % os.path.join("Custom", args.dataset[7:]))
exit(1)
_create_base_split_file(dataset = os.path.join("Custom", args.dataset[7:]) )
split_file_path = os.path.join(CONFIG.MISC_dataset_path, "Custom", args.dataset[7:], "list_eval_partition.csv")
elif args.dataset in known_datasets:
# Regular dataset
_create_base_split_file(dataset = args.dataset)
split_file_path = os.path.join(CONFIG.MISC_dataset_path , args.dataset , "list_eval_partition.csv")
else:
print("No dataset is found")
exit(1)
else:
print("Not a valid dataset!")
exit(1)
# then generate split file with random start and goal crops
_generate_split_file(split_file_path , args.grid_game)
| 37,541 | 36.617234 | 186 | py |
airloc | airloc-master/utils/utils.py | import math
import json
import os
import time
import zipfile
import signal
import pdb
import gc
from shutil import copyfile
import numpy as np
import torch
from glob import glob
# Might not be available on RISE
import seaborn as sns
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import random_split,Subset
import matplotlib
matplotlib.use('Agg')
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import urllib
from utils.dataset_utils import CustomDataset,Dubai,Masa, MasaSeven, MasaFilt, MasaFull, DubaiSeven, ImagesPre
from dateutil.parser import parse
# from torchviz import make_dot
from config import CONFIG
def move_crop(crop, offset_xy):
"""
crop has 4 columns in (h, w)-coordinate system:
1. Top-coordinate of the crop
2. Left-coordinate of the crop
3. Height of the crop
4. Width of the crop
offset_xy has 2 columns in (x, y)-coordinate system:
1. Movement in x-direction
2. Movement in y-direction
The output is given by moving crop along offset_xy
"""
# Translate offset_xy from (x,y)- to (h,w)-coordinate system
if isinstance(offset_xy, np.ndarray):
offset_hw = np.zeros_like(offset_xy)
else:
offset_hw = torch.zeros_like(offset_xy)
offset_hw[:, 0] = -offset_xy[:, 1] # H_img - offset_xy[:, 1]
offset_hw[:, 1] = offset_xy[:, 0]
# Perform the translation
if isinstance(offset_xy, np.ndarray):
moved_crop = np.zeros_like(crop)
else:
moved_crop = torch.zeros_like(crop)
moved_crop[:, :2] = crop[:, :2] + offset_hw
moved_crop[:, 2:] = crop[:, 2:]
return moved_crop
def compute_iou(crop1, crop2):
"""
Any given row of any of the two crops has the following format:
1. Top-coordinate of the crop
2. Left-coordinate of the crop
3. Height of the crop
4. Width of the crop
The output is the intersection-over-unions (IoUs) between the rows of
crop1 and crop2
"""
# Ensure correct data types and dims
if isinstance(crop1, tuple) or isinstance(crop1, list):
crop1 = np.array(crop1)[np.newaxis, :]
if isinstance(crop2, tuple) or isinstance(crop2, list):
crop2 = np.array(crop2)[np.newaxis, :]
if crop1.ndim == 1:
crop1 = crop1[np.newaxis, :]
if crop2.ndim == 1:
crop2 = crop2[np.newaxis, :]
# Get the coordinates of the intersection bounding box
try:
ihmins = np.maximum(crop1[:, 0], crop2[:, 0])
except:
print(crop1, crop2)
ihmaxs = np.minimum(crop1[:, 0] + crop1[:, 2], crop2[:, 0] + crop2[:, 2])
iwmins = np.maximum(crop1[:, 1], crop2[:, 1])
iwmaxs = np.minimum(crop1[:, 1] + crop1[:, 3], crop2[:, 1] + crop2[:, 3])
# TODO: Find out why this plus one was here
iws = np.maximum(iwmaxs - iwmins, 0)
ihs = np.maximum(ihmaxs - ihmins, 0)
# Calculate the area of the intersection
inters = iws * ihs
# Calculate the area of union
unis = crop1[:, 2] * crop1[:, 3] + crop2[:, 2] * crop2[:, 3] - inters
# Calculate and return the IoUs between crop1 and crop2
return inters / unis
def get_frac_outside(crop):
"""
Any given row of crop has the following format
1. Top-coordinate of the crop
2. Left-coordinate of the crop
3. Height of the crop
4. Width of the crop
The output is the percentage (fraction) of crops in crop (i.e. rows in
crop) that fall at least partially outside the full image
"""
# Get size of the full image
H_img, W_img = CONFIG.MISC_im_size
# Check for out-of-image
hmins_outside = crop[:, 0] < 0
hmaxs_outside = crop[:, 0] + crop[:, 2] >= H_img
wmins_outside = crop[:, 1] < 0
wmaxs_outside = crop[:, 1] + crop[:, 3] >= W_img
# Compute fraction of outside
outsides = np.logical_or(np.logical_or(hmins_outside, hmaxs_outside),
np.logical_or(wmins_outside, wmaxs_outside))
return np.count_nonzero(outsides) / len(outsides)
def normalize_coords(coords_xy, crop_locs_start, crop_locs_goal, unnormalize=False):
# Get size of the full image
H_img, W_img = CONFIG.MISC_im_size
# Start and goal bbox widths
heights_start = crop_locs_start[:, 2]
widths_start = crop_locs_start[:, 3]
heights_goal = crop_locs_goal[:, 2]
widths_goal = crop_locs_goal[:, 3]
# Perform the unnormalization
if isinstance(coords_xy, np.ndarray):
coords_xy_out = np.copy(coords_xy)
else:
coords_xy_out = torch.clone(coords_xy)
if unnormalize:
coords_xy_out[:, 0] *= (W_img - widths_start / 2 - widths_goal / 2)
coords_xy_out[:, 1] *= (H_img - heights_start / 2 - heights_goal / 2)
else:
coords_xy_out[:, 0] /= (W_img - widths_start / 2 - widths_goal / 2)
coords_xy_out[:, 1] /= (H_img - heights_start / 2 - heights_goal / 2)
return coords_xy_out
def _setup_fixed_games(n_games = 5):
""" Randomly sample n_games number of fixed games."""
# Get some images to be able to use get_random_crops
images = torch.zeros((n_games, 3, CONFIG.MISC_im_size[0] , CONFIG.MISC_im_size[1]))
_ , start_crop_locs = get_random_crops(images)
# Then sample the goal locs
_ , goal_crop_locs = get_random_crops( images, other_crop_locs = start_crop_locs , max_dist = CONFIG.RL_max_start_goal_dist)
return start_crop_locs , goal_crop_locs
def get_random_crops(images, other_crop_locs=None, max_dist=None, min_iou=None):
"""
Note that if max_dist and min_iou are both provided, then only max_dist
will be used. Hence, for min_iou to take effect, max_dist has to be None.
"""
# Define some useful constants
H, W = CONFIG.MISC_patch_size
step_sz = int(CONFIG.RL_softmax_step_size*H)
im_H,im_W = CONFIG.MISC_im_size
crop_locations = torch.zeros((images.shape[0], 4))
n_chan = images.shape[1]
n_imgs = images.shape[0]
# Initialize memory for the crops size = (batch, n_chan, H_p, W_p)
# Keep the number of channels at a constant
crops = torch.zeros(size=(n_imgs, n_chan, H, W))
for i in range(images.shape[0]):
if CONFIG.MISC_grid_game:
# Image is divided into a static uniform grid. Patches are sampled from this grid
upper_H , upper_W = int(im_H / H) , int(im_W / W)
lower_H , lower_W = ( 0 , 0)
target_pos = np.array([-1,-1])
if max_dist is not None:
target_pos = (other_crop_locs[i,0:2].numpy() / np.array(CONFIG.MISC_patch_size)).astype('int64')
upper_H , upper_W = (min(target_pos[0] + max_dist + 1, upper_H),min(target_pos[1] + max_dist + 1, upper_W)) #Has to be in int
lower_H , lower_W = (max(target_pos[0] - (max_dist ), 0) , max(target_pos[1] - max_dist, 0))
grid_loc = np.floor(np.random.uniform( low = [lower_H ,lower_W] , high = [upper_H , upper_W]))
while (grid_loc == target_pos).all():
grid_loc = np.floor(np.random.uniform( low = [lower_H ,lower_W] , high = [upper_H , upper_W]))
crop_loc = np.concatenate(((grid_loc ) *
np.array(step_sz) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
else:
# Sample the patches entirly at random
break_while = False
while not break_while:
crop_loc = transforms.RandomCrop.get_params(images[i, :, :, :][np.newaxis, :],
output_size=CONFIG.MISC_patch_size)
break_while = other_crop_locs is None or (max_dist is None and min_iou is None)
if not break_while:
# At this stage we may want to ensure that the sampled crop
# is not too far away from other_crop_locs, or that they
# do not have too little IoU-overlap
if max_dist is not None:
offset = get_regression_targets(crop_loc, other_crop_locs[i, :][np.newaxis, :],
normalize=False)
break_while = np.linalg.norm(offset) <= max_dist
elif min_iou is not None:
iou = compute_iou(crop_loc, other_crop_locs[i, :][np.newaxis, :])
break_while = iou >= min_iou
crop_locations[i, :] = torch.Tensor(np.array(crop_loc, dtype = int))
crops[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=crop_loc[0],
left=crop_loc[1], height=crop_loc[2],
width=crop_loc[3])
return crops, crop_locations
def get_deterministic_crops(images,coords = [0,0]):
"""
Allows for extraction of deterministic crops in the image
"""
# Define some useful constants
H, W = CONFIG.MISC_patch_size
im_H,im_W = CONFIG.MISC_im_size
crop_locations = torch.zeros((images.shape[0], 4))
n_chan = images.shape[1]
n_imgs = images.shape[0]
# Initialize memory for the crops size = (batch, n_chan, H_p, W_p)
# Keep the number of channels at a constant
crops = torch.zeros(size=(n_imgs, n_chan, H, W))
# Coords can be supplied as list or tensor but needs to be in correct numpy format
if isinstance(coords , torch.Tensor):
coords = coords.detach().numpy()
if isinstance(coords , list):
coords = np.array(coords)
if len(coords.shape) == 1:
coords = np.expand_dims(coords, 0)
coords = np.repeat(coords , n_imgs , 0)
# Iterate over the images getting the correct patches
for i in range(n_imgs):
h = int(coords[i][0])
w = int(coords[i][1])
crop_loc = [h , w , H,W]
if not CONFIG.RL_agent_allowed_outside and check_outside(torch.as_tensor(crop_loc)[None,:]):
# if the agent is not allowed outside and is going to end up outside
# move it in again
crop_loc = project_into_image(crop_loc)
# Sample the crop
crops[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top= crop_loc[0], left=crop_loc[1], height=H, width=W)[:,:48,:48]
crop_locations[i, :] = torch.Tensor(np.array(crop_loc))
return crops, crop_locations
def get_regression_targets(crop_locs_start, crop_locs_goal, normalize=True):
# Get size of the full image
H_img, W_img = CONFIG.MISC_im_size
# Ensure correct data types and dims
if isinstance(crop_locs_start, tuple) or isinstance(crop_locs_start, list):
crop_locs_start = np.array(crop_locs_start)[np.newaxis, :]
if isinstance(crop_locs_goal, tuple) or isinstance(crop_locs_goal, list):
crop_locs_goal = np.array(crop_locs_goal)[np.newaxis, :]
if isinstance(crop_locs_start, np.ndarray) and crop_locs_start.ndim == 1:
crop_locs_start = crop_locs_start[np.newaxis, :]
if isinstance(crop_locs_goal, np.ndarray) and crop_locs_goal.ndim == 1:
crop_locs_goal = crop_locs_goal[np.newaxis, :]
# Start
tops_start = crop_locs_start[:, 0]
lefts_start = crop_locs_start[:, 1]
heights_start = crop_locs_start[:, 2]
widths_start = crop_locs_start[:, 3]
# Go from (h,w)- to (x,y)-coordinate system
xs_start = lefts_start + widths_start / 2
ys_start = H_img - (tops_start + heights_start / 2)
# Goal
tops_goal = crop_locs_goal[:, 0]
lefts_goal = crop_locs_goal[:, 1]
heights_goal = crop_locs_goal[:, 2]
widths_goal = crop_locs_goal[:, 3]
# Go from (h,w)- to (x,y)-coordinate system
xs_goal = lefts_goal + widths_goal / 2
ys_goal = H_img - (tops_goal + heights_goal / 2)
# Offsets
xs_offset = xs_goal - xs_start
ys_offset = ys_goal - ys_start
# Concatenate
if isinstance(xs_offset, np.ndarray):
regression_targets = np.concatenate([xs_offset[:, np.newaxis], ys_offset[:, np.newaxis]], 1)
else:
regression_targets = torch.cat([torch.unsqueeze(xs_offset, 1), torch.unsqueeze(ys_offset, 1)], 1)
# Potentially normalize regression targets to the [-1, 1]-range
if normalize:
regression_targets = normalize_coords(regression_targets, crop_locs_start, crop_locs_goal)
# Return the regression targets
return regression_targets
def load_normalize_data(download=False, batch_size=16,
multiply_images = None, split='val', use_eval_split = False):
if not CONFIG.MISC_dataset in ['masa_filt', 'masa_seven'] and split=='test':
raise(Exception("Testing mode only implemented for the masa filt dataset"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
if os.path.exists(stat_path):
with open(stat_path) as json_file:
stats = json.load(json_file)
else:
print("Unable to find calculated mean and std for this dataset.\nUse utils/normalize_dataset.py")
exit(1)
# to control the behaviour of the function
CONFIG.RUNTIME_multiply_images = multiply_images
def collate_fn(batch):
# Transform each returned batch to desirerd format
images , labels = tuple(zip(*batch))
# If enabled make several training examples of each image
if CONFIG.RUNTIME_multiply_images is not None and CONFIG.RUNTIME_multiply_images != 1:
images = torch.stack(images)
images = torch.repeat_interleave(images,CONFIG.RUNTIME_multiply_images , dim = 0)
temp = np.asarray(labels)
start_crops = np.repeat(temp[:,0,:] , CONFIG.RUNTIME_multiply_images , axis =0)
goal_crops = np.repeat(temp[:,1,:] , CONFIG.RUNTIME_multiply_images, axis = 0)
labels = (start_crops , goal_crops)
else:
temp = np.asarray(labels)
labels = (temp[:,0,:] , temp[:,1,:])
images = torch.stack(images)
return ( images , labels)
# Select which interpolation to be used
interpolation = 'bilinear'
if interpolation == 'nearest':
# Works for label masks but totally destroys the images.
# Unable to train models on images that have been resize with nearest
interpolation_mode = transforms.InterpolationMode.NEAREST
elif interpolation == 'bilinear':
# BILINEAR interpolation ruin label masks
interpolation_mode = transforms.InterpolationMode.BILINEAR
elif interpolation == 'bicubic':
interpolation_mode = transforms.InterpolationMode.BICUBIC
else:
raise(Exception("Unkonwn interpolation mode"))
transforms_train = [transforms.Resize([CONFIG.MISC_im_size[0]+4,CONFIG.MISC_im_size[1]+4], interpolation = interpolation_mode),
transforms.CenterCrop(CONFIG.MISC_im_size)
]
transforms_val = [transforms.Resize([CONFIG.MISC_im_size[0]+4,CONFIG.MISC_im_size[1]+4], interpolation = interpolation_mode),
transforms.CenterCrop(CONFIG.MISC_im_size)]
# Data augmentation
transforms_train += [
#transforms.RandomResizedCrop(CONFIG.MISC_im_size, scale = (0.8,0.8),ratio = (1,1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip()]
# If we use seg add a dummy variable to make the transforms work properly
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['masa', 'masa_filt']:
stats['means'] += [0]
stats['stds'] += [1]
else:
stats['means'] = stats['means'][:3]
stats['stds'] = stats['stds'][:3]
transforms_train = [transforms.ToTensor()] + transforms_train
transforms_val = [transforms.ToTensor()] + transforms_val
transforms_train += [transforms.Normalize(stats["means"], stats["stds"])]
transforms_val += [transforms.Normalize(stats["means"], stats["stds"])]
transform_train = transforms.Compose(transforms_train)
transform_val = transforms.Compose(transforms_val)
if CONFIG.MISC_dataset == 'dubai':
trainset = Dubai(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train)
valset = Dubai(CONFIG.MISC_dataset_path ,split = 'val',transform =
transform_val, use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'masa':
trainset = Masa(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train)
valset = Masa(CONFIG.MISC_dataset_path ,split = 'val',transform =
transform_val)
elif CONFIG.MISC_dataset == 'masa_filt':
trainset = MasaFilt(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = MasaFilt(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'masa_seven':
trainset = MasaSeven(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = MasaSeven(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'dubai_seven':
trainset = DubaiSeven(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = DubaiSeven(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset.startswith('custom_'):
trainset = CustomDataset(CONFIG.MISC_dataset_path, CONFIG.MISC_dataset[7:],split = 'train' , transform = transform_train, custom_split_file = CONFIG.MISC_dataset_split_file)
valset = CustomDataset(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset[7:],split ='val', transform = transform_val, custom_split_file = CONFIG.MISC_dataset_split_file)
elif CONFIG.MISC_dataset == 'masa_full':
trainset = MasaFull(CONFIG.MISC_dataset_path, split='train', transform=transform_train, randomRotation=True)
valset = MasaFull(CONFIG.MISC_dataset_path, split='val', transform=transform_val, randomRotation=True)
elif CONFIG.MISC_dataset == 'images_pre':
trainset = ImagesPre(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = ImagesPre(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'images_post':
trainset = ImagesPre(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False, post_instead=True)
valset = ImagesPre(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split, post_instead=True)
else:
raise(Exception("Unknown dataset"))
def worker_init(*args):
signal.signal(signal.SIGINT , lambda x,y: None)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=download, num_workers=2, collate_fn=collate_fn , worker_init_fn=worker_init)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=download, num_workers=2, collate_fn=collate_fn, worker_init_fn = worker_init)
return trainloader, valloader
def visualize_batch(batch , save_name = 'vis_batch' , verbose = False , PATH = CONFIG.STATS_log_dir,
transform = None, prefix = 'agent'):
""" Visualizes all episodes in one batch. """
# Make save directory
dir_path = os.path.join(PATH , save_name)
if save_name == 'eval_0' and prefix == 'Det':
dir_path = os.path.join(PATH , 'eval')
os.makedirs(dir_path,exist_ok= True)
elif 'eval' in save_name:
dir_path = os.path.join(PATH , 'eval')
elif prefix == 'agent':
os.makedirs(dir_path)
# Limit number of episodes to visualize per batch
if CONFIG.MISC_vis_per_batch:
nbr_vis = min(CONFIG.MISC_vis_per_batch , batch.idx)
else:
nbr_vis = batch.idx
# Loop through each episode and visualize it
for i in range( nbr_vis ):
if 'eval' in save_name:
name = save_name + '_'+ prefix
else:
name = str(i)+ "_vis_" + prefix
episode , _ , weights= batch.get_episode(i, batch.steps[i].int() + 1)
visualize_trajectory(episode , save_name = name,verbose =True,
transform = transform , PATH = dir_path)
def visualize_trajectory(episode,
save_name='visualization', verbose=False, PATH = CONFIG.STATS_log_dir ,
transform = None):
"""
start_patch, goal_patch and agent_patches all have the format
(h_topleft, w_topleft, height, width)
- The patches need to be of numpy format
- To plot a sequence of agent patches just insert the sequence like (len(seq),4)
- To plot only one it is fine to insert of the shape (4)
"""
start_patch = episode.locs[0]
goal_patch = episode.loc_goal[0]
full_img = episode.image.squeeze(0).squeeze(0)[0:3,:,:].cpu()
agent_patches = episode.locs[1:, :]
rewards = episode.weights.cpu()
# Get size of the full image and patches
H_img, W_img = CONFIG.MISC_im_size
H_patches, W_patches = CONFIG.MISC_patch_size
step_sz = CONFIG.MISC_step_sz
# If we recieved a transform -> use it
if transform is not None:
full_img = transform(full_img)
# Add image to plotting
fig, ax = plt.subplots( figsize = (14, 8))
ax.imshow(np.transpose(full_img, (1, 2, 0)))
# Draw all the possible boxes
for i in range(CONFIG.MISC_game_size * CONFIG.MISC_game_size):
y, x = divmod(i, CONFIG.MISC_game_size)
rect = patches.Rectangle((x * step_sz,y * step_sz),
W_patches - 1,
H_patches - 1,
edgecolor = 'w',
facecolor = 'none')
ax.add_patch(rect)
# Add start_patch and goal_patch
# NOTE: patches.Rectangle's xy refers to y going from top to bottom, i.e. it
# is "reversed" relative to the mathematical y-axis which would go from
# bottom to top
text_offset_x = CONFIG.MISC_patch_size[0] // 2
text_offset_y = CONFIG.MISC_patch_size[0] // 2
# Check if goal and final loc is same plot success as yellow rectangle instead
final_iou = compute_iou(agent_patches[-1,:] , goal_patch)
if final_iou > CONFIG.RL_done_iou:
rect_goal_color = 'y'
else:
rect_goal_color = 'g'
rect_goal = patches.FancyBboxPatch(xy=(goal_patch[1], goal_patch[0]),
width=goal_patch[3],
height=goal_patch[2],
linewidth=1.5,
boxstyle = patches.BoxStyle("Round",
pad=-13,
rounding_size=5),
edgecolor='none', facecolor='#97D077')
rect_start = patches.FancyBboxPatch(xy=(start_patch[1], start_patch[0]),
width=start_patch[3],
height=start_patch[2],
linewidth=1.5,
boxstyle = patches.BoxStyle("Round",
pad=-13,
rounding_size=5),
edgecolor='none', facecolor='#D0CEE2')
ax.add_patch(rect_start)
ax.text(start_patch[1] + text_offset_x, start_patch[0] + text_offset_y,
"S", fontsize=23, color='w',rotation = 0,rotation_mode = 'anchor',
horizontalalignment='center',verticalalignment='center')
# Make sure that the agent-selected patches are of the corrrect dimensions
#add one so that the loop is correct
if len(agent_patches.shape) == 1:
agent_patches = agent_patches[np.newaxis, :]
# Add agent-selected patch(es)
for i in range(agent_patches.shape[0]):
agent_patch = agent_patches[i,:]
# agent_rect = patches.Rectangle(xy=(agent_patch[1], agent_patch[0]), width=agent_patch[3],
# height=agent_patch[2], linewidth=1.5, edgecolor='r', facecolor='none')
# if final_iou < CONFIG.RL_done_iou or i < (agent_patches.shape[0]-1):
# ax.add_patch(agent_rect)
ax.text(agent_patch[1] + 4*i + 4, agent_patch[0] + 6 , str(i + 1),
horizontalalignment='left',verticalalignment='center',
bbox=dict(boxstyle='circle',fc='#7EA6E0',ec='none'), fontsize=11, color='w')
# Show IoU only in the last iteration which should be the convergent one
if verbose and i == (agent_patches.shape[0]-1):
dist = np.linalg.norm(get_regression_targets(agent_patch[None,:], goal_patch[None,:], normalize=False))
iou = compute_iou(agent_patch, goal_patch)
# Set text in top right corner for easier readability
ax.text(W_img + 2, 2 , 'Final dist = '+str(round(dist,1)), fontsize=9, color='b')
ax.text(W_img + 2, 12 , 'Final IoU = '+str(round(iou.item() , 3)), fontsize=9, color='b')
# Add option to show rewards for each step
if verbose and rewards is not None:
ax.text(W_img + 2 , 20 + 10*i , 'Reward %d: %2.1f'% (i, rewards[i]), fontsize = 8 , color='b')
ax.add_patch(rect_goal)
ax.text(goal_patch[1] + text_offset_x, goal_patch[0] + text_offset_y, "G",
horizontalalignment='center',verticalalignment='center',
fontsize=23, color='w',rotation = 0,rotation_mode = 'anchor')
if save_name is not None:
# If save name is none show image instead
fig.savefig(os.path.join(PATH, save_name + '.png'))
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
else:
plt.show()
def get_crop_distance(crop_loc1 , crop_loc2):
dist = math.sqrt((crop_loc1[0] - crop_loc2[0])**2 + (crop_loc1[1] - crop_loc2[1])**2)
return dist
def check_outside(crops_loc):
H, W = CONFIG.MISC_patch_size
im_H,im_W = CONFIG.MISC_im_size
bools = torch.zeros(crops_loc.shape[0])
for i,crop_loc in enumerate(crops_loc):
bools[i] = (crop_loc[0] < -H) or (crop_loc[0] > im_H) or (crop_loc[1] < -W) or (crop_loc[1] > im_W)
return bools
def project_into_image(crop_loc):
H, W = CONFIG.MISC_patch_size
im_H,im_W = CONFIG.MISC_im_size
if crop_loc[0] < -H: crop_loc[0] = 0
elif crop_loc[0] > im_H-H: crop_loc[0] = im_H-H
if crop_loc[1] < -W: crop_loc[1] = 0
elif crop_loc[1] > im_W-W: crop_loc[1] = im_W-W
return crop_loc
# Used to select the latest results folder
def selectLatest(input):
# Transform to correct datettime format
dt = ''.join([input[0:10],input[10:19].replace('-',':')])
dt = dt.replace('_','T')
# Convert to date time and then to unix time
dt = parse(dt)
return time.mktime(dt.timetuple())
"""
Finds the latest directory in a folder sorted by the directory name format used in the logs.
"""
def find_latest_log(log_folder_path , n_latest = 0):
folders = os.listdir(log_folder_path)
# Add filtering for non directory filenames
# Should not be any non-dirs here so might be unnecessary
folders = list(filter( lambda d: os.path.isdir(os.path.join(log_folder_path , d)) , folders))
# Sort it based on name
try:
folders.sort(key = selectLatest , reverse = True)
except:
print("Error when trying to sort the logs in find_latest_log")
if n_latest >= len(folders):
# Not that many logs available
return None
else:
return folders[n_latest]
"""
Reads the contents of the config file and overwrites the current settings with the ones in the config file.
"""
def read_and_overwrite_config(config_file_path , same_data = False):
with open(config_file_path , 'r') as local_config:
# Iterate over file lines
for line in local_config:
# If it does not start with CONFIG not relevant
if line[0] != 'C' or "CONFIG." not in line:
continue
split = line.split('.')[1].strip(' #=').split('=')
setting = split[0].strip(' #')
# Check which setting
if "RL_priv" in setting:
# Get that setting
value = eval(split[1].strip(' #'))
CONFIG[setting] = value
# We also need the patch size and image size to match
if "MISC_patch_size" in setting or "MISC_im_size" in setting:
value = eval(split[1].strip(' #'))
CONFIG[setting] = value
# If enabled will also read settings that make the trainset contain exact same
# data as the model was trained on.
if same_data and "MISC_random_seed" in setting or "MISC_dataset" == setting:
value = eval(split[1].strip(' #'))
CONFIG[setting] = value
def cosine_sim_heatmap(embeddings , grid_size = 16, pos = [1,2]):
grid_size = grid_size - 1
# Generate positional embeddings for all positions
comb = torch.combinations( torch.arange(0, grid_size) , r = 2)
# Add flipped
comb = torch.cat((comb , comb.flip(dims=(1,))))
comb = torch.cat((comb , torch.cat((torch.arange(0,grid_size).unsqueeze(1),torch.arange(0,grid_size).unsqueeze(1)),dim = 1))).long()
pos_embedding = torch.flatten(embeddings[0,comb ] , start_dim = 1, end_dim = 2)
# Plot heat map of one positional embedding compared to the rest
selected_pos = torch.tensor(pos)
selected_pos_emb = pos_embedding[ ( selected_pos == comb).all(dim = 1) , :]
cosine_sim = torch.nn.CosineSimilarity( dim = 1)
# Calculate
cosine_similarities = cosine_sim( selected_pos_emb , pos_embedding)
# Due to torch's inability to index with tensors properly we will have to inser
# the values manually
heat_map_tensor = torch.ones((grid_size , grid_size ))
for (i, sim) in enumerate(cosine_similarities):
heat_map_tensor[comb[i,0] , comb[i,1]] = sim
# Convert to numpy
heat_map_numpy = heat_map_tensor.cpu().numpy()
ax = sns.heatmap(heat_map_numpy)
ax.set_title('Positional Encoding: (%d,%d)' % (pos[0] , pos[1]))
plt.savefig(os.path.join(CONFIG.STATS_log_dir , "positional_embeddings" , "embedding_%d_%d.jpg" % (pos[0] , pos[1])))
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
def calculate_cnn_output_size(layers, input_size, max_pool):
""" OLD_NOT_USED : layer = [kernel , out_chan, padding, stride, in_chan]"""
""" layer = [in_chan , out_chan, kernel, stride, padding]"""
H , W = input_size
C = layers[0][4]
for layer in layers:
H = (H + 2 * layer[4] - (layer[2] - 1) - 1 ) / layer[3] + 1
W = (W + 2 * layer[4] - (layer[2] - 1) - 1 ) / layer[3] + 1
C = layer[1]
# If max_pool avaialble assume there is such a layer after each conv
if max_pool is not None:
H = (H - (max_pool[0] - 1) - 1) / max_pool[1] + 1
W = (W - (max_pool[0] - 1) - 1) / max_pool[1] + 1
H = math.floor(H)
W = math.floor(W)
return [H,W,C]
def crop_s(crop1 , crop2 ,T = None, DEBUG = True):
"""
Simply plots two crops side by side. Usefull for debugging.
"""
if DEBUG:
# Assume in debug mode and switch to TkAgg backend
matplotlib.use('TkAgg')
# If a transform is supplied use it
if T is not None:
crop1 , crop2 = T(crop1) , T(crop2)
fig, axes = plt.subplots(nrows = 1 , ncols = 2)
axes[0].imshow(crop1.permute(1,2,0))
axes[1].imshow(crop2.permute(1,2,0))
axes[0].set_title("Crop 1")
axes[1].set_title("Crop 2")
# TODO
if DEBUG:
plt.show()
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
def get_correct_act(loc_start , loc_goal):
""" Calculate which action (in ohe form) is the correct."""
grid_correct_move = ( loc_goal[0,0:2] - loc_start[0,0:2]) / ( CONFIG.RL_softmax_step_size * torch.tensor(CONFIG.MISC_patch_size)).long()
# Due to flooring of negative integers
grid_correct_move = grid_correct_move.round()
# remember first coordinate is y (positive downwards)
dx, dy = grid_correct_move[1].item() , grid_correct_move[0].item()
# now map this move to ohe
if dx == 0 and dy == -1:
i = 0 # Move up
elif dx == 1 and dy == -1:
i = 1 # Move up right
elif dx == 1 and dy == 0:
i = 2 # Move right
elif dx == 1 and dy == 1:
i = 3 # move right down
elif dx == 0 and dy == 1:
i = 4 # move down
elif dx == -1 and dy == 1:
i = 5 # Move down left
elif dx == -1 and dy == 0:
i = 6 # Move left
elif dx == -1 and dy == -1:
i = 7 # Move up left
else:
raise(Exception("Unknown move in get_correct_act. Is maximum distance really 1?"))
correct_act = torch.zeros((1,8))
correct_act[0,i] = 1
return correct_act
def setupLogDir( log_dir_path):
"""
Create log directory and save all current files. If pretrained network is enabled
move the config files from that log directory to this one as well.
"""
if not os.path.exists(CONFIG.STATS_log_dir_base):
os.makedirs(CONFIG.STATS_log_dir_base)
os.makedirs(CONFIG.STATS_log_dir, exist_ok = False)
CONFIG.STATS_vis_dir = os.path.join(CONFIG.STATS_log_dir, "visualizations")
os.makedirs(CONFIG.STATS_vis_dir)
CONFIG.STATS_scripts_dir = os.path.join(CONFIG.STATS_log_dir, "saved_scripts")
os.makedirs(CONFIG.STATS_scripts_dir)
copyfile("training/train_agent.py" , os.path.join(CONFIG.STATS_scripts_dir, "train_agent.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_scripts_dir , ".." , "config.py"))
# Save Network files
copyfile("networks/rnn_agents.py", os.path.join(CONFIG.STATS_scripts_dir, "rnn_agents.py"))
# Save Utils files
copyfile("utils/utils.py", os.path.join(CONFIG.STATS_scripts_dir, "utils.py"))
copyfile("utils/training_utils.py" , os.path.join(CONFIG.STATS_scripts_dir , "training_utils.py"))
copyfile("utils/agent_utils.py" , os.path.join(CONFIG.STATS_scripts_dir, "agent_utils.py"))
# Create folder for saving intermediate models
#os.makedirs(os.path.join(CONFIG.STATS_log_dir, "models"))
CONFIG.STATS_metrics_dir = os.path.join(CONFIG.STATS_log_dir, "metrics")
os.makedirs(CONFIG.STATS_metrics_dir)
# If pretrained network is to be loaded save the config files from that folder as well
if CONFIG.RL_priv_pretrained:
CONFIG.STATS_pretrained_log_dir = os.path.join(CONFIG.STATS_log_dir, "pretrained")
os.makedirs(CONFIG.STATS_pretrained_log_dir)
embedder_type_mapping = dict([
("Doerch" , 'RL_pretrained_doerch_net'),
("ShareNet" , 'RL_pretrained_doerch_net'),
("Segmentations", 'RL_pretrained_segmentation_net')
])
pre_trained_dir = CONFIG[embedder_type_mapping[CONFIG.RL_patch_embedder]]
# Check if pretrained directory really exists
if not os.path.exists(pre_trained_dir):
raise(Exception("Unable to find pretrained directory:\t%s" , pre_trained_dir))
pretrained_files = glob(pre_trained_dir + "/*.py")
pretrained_files += glob(pre_trained_dir + "/*.json")
for file in pretrained_files:
dst_file = file[file.rfind("/")+1:]
copyfile(file, os.path.join(CONFIG.STATS_pretrained_log_dir, dst_file))
def get_pos_goal_loc(loc , grid_size , distance):
"""
Get all possible grid positions at a given distance from loc.
"""
pos_grid = np.ones((grid_size,grid_size)) * -1
try:
pos_grid[loc[0],loc[1]] = 0
except:
pdb.set_trace()
for d in range(1, distance + 1 ):
# Get index of previous locations
inds = np.asarray(np.where(pos_grid == (d - 1))).T
# For each direction take a step and write new distance (if possible)
for i in range(0,8):
if i == 0:
dir = np.array([[-1,0]])
elif i == 1:
dir = np.array([[-1,1]])
elif i == 2:
dir = np.array([[0,1]])
elif i == 3:
dir = np.array([[1,1]])
elif i == 4:
dir = np.array([[1,0]])
elif i == 5:
dir = np.array([[1,-1]])
elif i == 6:
dir = np.array([[0,-1]])
elif i == 7:
dir = np.array([[-1,-1]])
inds_dir = inds + dir
# Filter out all new locations that are outside grid
inds_dir = inds_dir[((inds_dir >= 0) & (inds_dir < grid_size)).all(axis= 1),:]
# Also filter out any new location already visited
inds_dir = inds_dir[ (pos_grid[inds_dir[:,0] , inds_dir[:,1]] == -1), :]
# Write new distance
if len(inds_dir) > 0:
pos_grid[inds_dir[:,0] , inds_dir[:,1]] = d
# When completed find indicies of positions with distance
arrays_distance = np.stack(np.where( pos_grid == distance)).T
if len(arrays_distance) == 0:
raise(Exception("Did not find any valid locations"))
return arrays_distance
def sample_grid_games_fixed_distance( distance, n_games):
"""
Samples n_games with fixed distance between start goal.
Returns: n_games x 4 with start and goal loc
"""
# Calculate grid size
grid_size = CONFIG.MISC_im_size[0] // CONFIG.MISC_step_sz + 1
# Determine if bounds are needed for sampling start
if distance > grid_size // 2:
# TODO
start_bounds = 0
# sample start locations
# Due to distance being so large that not all start locations are possible
# create a grid of valid start positions to sample from
grid = np.reshape( np.arange(0,grid_size**2) , (grid_size, grid_size))
grid_pos_dist = (grid_size - distance)
# Check that grid_pos_dist is positive
if grid_pos_dist <= 0:
raise(ValueError("Distance equal to or larger than grid size."))
grid[grid_pos_dist:grid_size - grid_pos_dist , grid_pos_dist:grid_size - grid_pos_dist] = -1
grid = grid.flatten()
grid = np.delete( grid, np.where( grid == -1))
# Grid now contains 1D array of allowed start positions
start_pos = np.random.choice( grid, size = n_games)
start_locs = np.vstack( ( start_pos // grid_size, start_pos % grid_size)).T
else:
# No need for bounds sample start location
start_locs = np.random.randint(0, grid_size, size = (n_games, 2))
goal_locs = np.zeros_like(start_locs)
# For each unique start location generate
unique_start_locs = np.unique(start_locs , axis = 0)
pos_goal_locs = {}
for loc in unique_start_locs:
# Calculate number of locs with this position
locs_ind = (( start_locs == loc).all(axis = 1))
pos_goal_locs = get_pos_goal_loc(loc, grid_size , distance)
rand_inds = np.random.randint( pos_goal_locs.shape[0] , size = locs_ind.sum())
# These are the sampled goal locations for this start positio
local_goal_locs = pos_goal_locs[rand_inds, :]
# take these goal locs and put them in the ot
goal_locs[ np.where( locs_ind) , : ] = local_goal_locs
return np.hstack( (start_locs, goal_locs))
| 40,412 | 38.159884 | 181 | py |
airloc | airloc-master/utils/create_split_file.py |
import pandas as pd
import os
import matplotlib
import argparse
import torch
from utils.dataset_utils import *
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import time
import numpy as np
from doerchnet.utils import visualize_doerch
from utils.utils import sample_grid_games_fixed_distance
from config import CONFIG
def create_fixed_game_split_file(args ):
if args.dataset is None:
print("\nERROR:\tPlease select a dataset\n")
exit(1)
# Check if dataset exists
dataset_path = os.path.join(CONFIG.MISC_dataset_path , args.dataset)
if not os.path.exists(dataset_path):
print("\nERROR:\tDataset not found:\t%s\n" % args.dataset)
exit(1)
# Locate current split file
if args.grid_game:
split_file_path = os.path.join(dataset_path , "list_eval_partition_grid.csv")
else:
split_file_path = os.path.join(dataset_path, "list_eval_partition.csv")
# Check its existence
if not os.path.exists(split_file_path):
print("Current %s split file not found." % str(args.grid_game))
exit(2)
# Read split file and filter out for this split
orig_split = pd.read_csv(split_file_path)
if args.split == 'train':
orig_split = orig_split[orig_split.partition == 0.0]
elif args.split == 'val':
orig_split = orig_split[orig_split.partition == 1.0]
elif args.split == 'test':
orig_split = orig_split[orig_split.partition == 2.0]
else:
raise(Exception("Unexpected split:\t%s" % args.split))
# For each file in the dataframe create args.number_games games and append to the new split
# First two are start coordinates and last two are goal coordinates
n_imgs = orig_split.shape[0]
games = np.zeros((orig_split.shape[0] * args.number_games , 4))
locs_start = np.array([[0,4]]).repeat(orig_split.shape[0],axis=0)
for i in range(1,args.number_games + 1):
if args.number_games == 24:
idx = i
idx-=1
if idx >=20: idx+=1
y,x = divmod(idx,5)
print(x,y)
locs_goal = np.array([[x,y]]).repeat(orig_split.shape[0],axis=0)
temp = np.hstack((locs_start,locs_goal))
else:
if i > CONFIG.MISC_game_size - 1:
difficulty = i % (CONFIG.MISC_game_size - 1)
else:
difficulty = i
print(difficulty)
temp = sample_grid_games_fixed_distance(difficulty , n_imgs)
games[(i-1)*n_imgs:i*n_imgs,:] = temp
# Now games have been sampled. Multiply by patch size to get coordinates and write to file
games *= CONFIG.MISC_step_sz
file_names = np.expand_dims(np.array(list(orig_split['image_id']) * args.number_games) , axis = 1)
data = np.hstack( (file_names, games, np.ones((orig_split.shape[0] * args.number_games, 2)) * CONFIG.MISC_patch_size))
cols = ["image_id" , "start_y", "start_x" , "goal_y", "goal_x" , "patch_size_x" , "patch_size_y"]
new_split = pd.DataFrame(data , columns = cols)
if args.number_games == 24:
# Write to file
new_split_file_path = os.path.join(dataset_path , "%s_eval_partition_grid_fixed_start.csv" % args.split)
new_split.to_csv(new_split_file_path)
else:
# Write to file
new_split_file_path = os.path.join(dataset_path , "%s_eval_partition_grid_fixed_distance.csv" % args.split)
new_split.to_csv(new_split_file_path)
def verify_split_file(args):
matplotlib.use('TkAgg')
transform = transforms.Compose([
transforms.Resize(CONFIG.MISC_im_size),
transforms.ToTensor()
])
# Load dataset
if args.dataset == 'masa_filt':
dataset = MasaFilt(CONFIG.MISC_dataset_path,split = 'val', use_eval_split = True)
else:
raise(Exception("Unkown dataset"))
image , locs = dataset.getitem__by_image_id(args.image_id)
# Convert image to tensor
image = transform(image)
locs = np.concatenate((locs, 48 * np.ones((4,2,2))) , axis = 2)
locs = torch.from_numpy(locs)
for i in range(locs.shape[0]):
visualize_doerch(image, locs[i,0,:] , locs[i,1,:] ,torch.tensor([1,0]), save_name = 'vis_%d'%i)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset" , "-d" , type=str , default = None, help = "Select which dataset to create split file for.")
parser.add_argument("--split" , type = str , default = "val" , help = "Which split should be created.")
parser.add_argument("--grid-game" , type = bool, default = True)
parser.add_argument("--number-games", type = int, default = CONFIG.MISC_game_size - 1 , help = "Number of games to create for each image.")
parser.add_argument("--verify", type = bool, default = False, help = "Verify new split file visually.")
parser.add_argument("--image-id" , type = str, help = "Which image to visualize")
args = parser.parse_args()
if not args.verify:
print("Creating eval split file for %s" % args.dataset)
print("Partition selected:\t%s" % args.split)
print("Number of games per image:\t%d" % args.number_games)
create_fixed_game_split_file(args)
else:
print("Verifying current eval split file.")
verify_split_file(args)
| 5,333 | 34.324503 | 143 | py |
airloc | airloc-master/utils/training_utils.py | import torch
import sys
import numpy as np
import torchvision.transforms as transforms
from copy import deepcopy
import time
import math
import torchvision.transforms.functional as F
from torchvision.transforms import Resize
from utils.agent_utils import rewards_to_go, normalize_batch_weights
from utils.utils import get_random_crops, get_deterministic_crops, compute_iou,\
get_correct_act, sample_grid_games_fixed_distance
from doerchnet.utils import sample_doerch_crops
from config import CONFIG
class BatchStorage():
""" Class used as storage container for information generated during one batch."""
def __init__(self,sc, CONFIG = CONFIG ):
self.CONFIG = CONFIG
self.device = CONFIG.device
self.ep_len = self.CONFIG.RL_max_episode_length
self.n_eps = self.CONFIG.RL_batch_size * self.CONFIG.RL_multiply_images
self.p_H, self.p_W = self.CONFIG.MISC_patch_size
self.im_H, self.im_W = self.CONFIG.MISC_im_size
self.sc = sc
self.proptime = 0
self.max_len_batch = self.ep_len * self.n_eps
def initialize(self, batch_size=None):
# Allocate memory for all inputs, priv info, trainign tensors
if batch_size: self.n_eps = batch_size
# This index is the number of episodes processed
self.idx = 0
self.difficulty = torch.zeros(self.n_eps)
# Allocate for those that are always active
self.weights = torch.zeros((self.n_eps,self.ep_len))
# Need this nan stuff to make it work
self.weights[:,:] = torch.nan
# TODO: Maybe add an extra dimension if we need duplicate storage
self.locs = torch.zeros((self.n_eps , self.ep_len + 1, 4))
self.locs_goal = torch.zeros((self.n_eps, 4))
# Check if instance segmentation input is enabled if so add one extrac channel
n_chan_images = 3
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['dubai']:
n_chan_images = 6
elif CONFIG.MISC_dataset in ['masa', 'masa_filt']:
n_chan_images = 4
else:
raise(Exception("Define which type of segmentation this dataset has"))
self.image = torch.zeros((self.n_eps, n_chan_images, self.im_H , self.im_W))
# Account for the extra channel tracking where the image ends
n_chan_crops = n_chan_images
# Add extra one crop for final position after final action
self.crops = torch.zeros((self.n_eps, self.ep_len + 1, n_chan_crops, self.p_H, self.p_W)).to(self.CONFIG.device)
# If softmax enabled the action is onehotencoding of 8 possible direcitons
self.actions = torch.zeros((self.n_eps, self.ep_len , 8)).to(self.CONFIG.device)
# Distance from current crop to goal for all action-reward-state tuples
self.dists = torch.zeros((self.n_eps, self.ep_len)).to(self.device)
self.dists[:,:] = torch.nan
self.iou = torch.zeros((self.n_eps,))
self.has_converged = torch.zeros((self.n_eps,))
self.crops_goal = torch.zeros((self.n_eps , n_chan_crops, self.p_H, self.p_W)).to(self.CONFIG.device)
if CONFIG.RL_priv_grid_location:
self.grid_loc = torch.zeros((self.n_eps , self.ep_len , int(self.im_H / self.p_H) , int(self.im_W/self.p_W))).to(CONFIG.device)
self.grid_curr_loc = torch.zeros((self.n_eps , self.ep_len , int(self.im_H / self.p_H) , int(self.im_W/self.p_W))).to(CONFIG.device)
# Batch statistics
self.steps = torch.zeros(( self.n_eps , ))
self.iou = torch.zeros(( self.n_eps , ))
self.hasConverged = torch.zeros(( self.n_eps , ))
self.final_distance = torch.zeros((self.n_eps , ))
self.final_distance[:] = torch.nan
self.distance = torch.zeros(( self.n_eps , ))
self.cumulativeRewardToGo = torch.zeros(( self.n_eps , ))
# Set all values to nan. Only use episodes which were successfull for mean calc
self.stepRatio = torch.zeros(( self.n_eps , ))
self.stepRatio[:] = torch.nan
self.time = torch.zeros(( self.n_eps , ))
def append_episode(self, episode):
""" Add all information from one episode to this batch """
# First insert state, reward, action tuples
self.steps[self.idx] = episode.step
self.crops[self.idx ,:episode.step +1,: ,:,:] = episode.crops
self.weights[self.idx , :episode.step] = episode.weights
self.actions[self.idx , :episode.step,:] = episode.actions
self.dists[self.idx , :episode.step] = episode.dists
# If agent did not succeed add final dist
if episode.iou.item() < CONFIG.RL_done_iou:
self.final_distance[self.idx] = (( episode.loc_goal[0,:2] - episode.locs[-1,:2]) / int(CONFIG.MISC_patch_size[0] * CONFIG.RL_softmax_step_size)).abs().int().max().item()
self.locs[self.idx , :episode.step + 1, :] = episode.locs
self.locs_goal[self.idx , :] = episode.loc_goal
self.iou[self.idx] = episode.iou
self.hasConverged[self.idx] = 1.0 if episode.iou >= CONFIG.RL_done_iou else 0.0
self.distance[self.idx] = episode.dists[-1]
self.cumulativeRewardToGo[self.idx] = episode.weights[0]
# Only add value for step ratio if agent was successfull this episode
if episode.iou >= CONFIG.RL_done_iou:
self.stepRatio[self.idx] = episode.min_steps / episode.step
self.time[self.idx] = episode.time
self.image[self.idx, :,:,:] = episode.image
# The difficulty of the played game
self.difficulty[self.idx] = episode.dists[0]
if CONFIG.RL_priv_grid_location:
self.grid_loc[self.idx , :,:,:] = episode.grid_loc
self.grid_curr_loc[self.idx ,:,:,:] = episode.grid_curr_loc
self.crops_goal[self.idx , :,:,:] = episode.crop_goal
self.idx += 1
def prepare_for_loss(self):
# Do any necessary prepartions for loss computationo
# For now only normalize the weights
self.weights = normalize_batch_weights(self.weights, self.dists).detach()
def get_episode(self, ep_id , step_id):
""" Retrieves a detached episode from the batch."""
ep = EpisodeStorage(CONFIG = self.CONFIG)
ep.initialize(self.image[ep_id,:,:,:][None,:].clone())
ep.crops = self.crops[ep_id , 0:step_id , :,:,:].clone()
# Set episode values
ep.crop_goal = self.crops_goal[ep_id,:,:,:].clone().unsqueeze(0)
ep.locs = self.locs[ep_id , :step_id ,:]
ep.loc_goal = self.locs_goal[ep_id, :].unsqueeze(0)
ep.weights = self.weights[ep_id,:step_id]
# Set correct number of steps taken
ep.step = step_id-1
if CONFIG.RL_priv_grid_location:
ep.grid_loc = self.grid_loc[ep_id]
ep.grid_curr_loc = self.grid_curr_loc[ep_id]
if step_id > self.steps[ep_id]:
# Get entire episode, no action in final state
action = None
weight = None
else:
action = self.actions[ep_id,step_id-1,:]
weight = self.weights[ep_id,step_id-1]
# Return state (episode), the taken action, and the weight for this tuple
return ep , action, weight
def store(self, mode = '', eval=False):
"""
Stores the statistics for the batch
"""
batch_final_distance = self.final_distance.nanmean()
if not math.isnan(batch_final_distance.item()):
self.sc.s(mode + 'FinalDistanceOnlyFailure').collect(batch_final_distance.item())
batch_final_distance = torch.tensor([0.0])
self.sc.s(mode + 'Steps').collect(self.steps.mean().item())
self.sc.s(mode + 'IoU').collect(self.iou.mean().item())
self.sc.s(mode + 'CumulativeRewardToGo').collect(self.cumulativeRewardToGo.mean().item())
self.sc.s(mode + 'HasConverged').collect(self.hasConverged.float().mean().item())
inds = self.dists[:,0].repeat(6,1).cpu() == torch.as_tensor([[1],[2],[3],[4],[5],[6]])
temp = np.array([self.steps[inds[0]].mean().item(),
self.steps[inds[1]].mean().item(),
self.steps[inds[2]].mean().item(),
self.steps[inds[3]].mean().item(),
self.steps[inds[4]].mean().item(),
self.steps[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedSteps').collect(temp)
temp = np.array([self.iou[inds[0]].mean().item(),
self.iou[inds[1]].mean().item(),
self.iou[inds[2]].mean().item(),
self.iou[inds[3]].mean().item(),
self.iou[inds[4]].mean().item(),
self.iou[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedIoU').collect(temp)
temp = np.array([self.cumulativeRewardToGo[inds[0]].mean().item(),
self.cumulativeRewardToGo[inds[1]].mean().item(),
self.cumulativeRewardToGo[inds[2]].mean().item(),
self.cumulativeRewardToGo[inds[3]].mean().item(),
self.cumulativeRewardToGo[inds[4]].mean().item(),
self.cumulativeRewardToGo[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedCumulativeRewardToGo').collect(temp)
temp = np.array([self.hasConverged[inds[0]].mean().item(),
self.hasConverged[inds[1]].mean().item(),
self.hasConverged[inds[2]].mean().item(),
self.hasConverged[inds[3]].mean().item(),
self.hasConverged[inds[4]].mean().item(),
self.hasConverged[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedHasConverged').collect(temp)
# Store the relative difficulty of the played games
relative_diff = np.array([
((self.difficulty == 1).sum() / self.n_eps).item() ,
((self.difficulty == 2).sum() / self.n_eps).item() ,
((self.difficulty == 3).sum() / self.n_eps).item() ,
((self.difficulty == 4).sum() / self.n_eps).item() ,
])
self.sc.s(mode + 'Difficulty').collect(relative_diff)
batch_step_ratio = self.stepRatio.nanmean()
if not math.isnan(batch_step_ratio.item()):
self.sc.s(mode + 'StepRatioOnlySuccess').collect(batch_step_ratio.item())
batch_step_ratio = torch.tensor([0.0]).cpu()
#self.sc.s(mode + 'StepTime').collect(self.time.float().mean().item())
#self.sc.s(mode + 'PropTime').collect(self.proptime)
temp_actions_taken = torch.flatten(self.actions , start_dim = 0, end_dim = 1)
temp_actions_taken = temp_actions_taken[temp_actions_taken.sum(dim = 1) != 0 , : ].mean(dim = 0)
self.sc.s(mode + 'ActionsTaken').collect( temp_actions_taken.cpu().numpy())
#temp_correct_actions = self.correct_act[ self.correct_act.sum(dim = 1) != 0 , :].mean(dim = 0)
#self.sc.s(mode + 'CorrectActions').collect(temp_correct_actions.cpu().numpy())
if eval:
self.sc.s(mode + 'GoalLoc').collect(self.locs_goal[0].numpy())
self.sc.s(mode + 'ActionProbs').collect(self.actions[0].cpu().numpy())
class EpisodeStorage():
""" Class used as storage container for all information generated during run of one episode."""
def __init__(self, CONFIG=CONFIG):
self.CONFIG = CONFIG
self.device = CONFIG.device
self.ep_len = self.CONFIG.RL_max_episode_length +1
self.p_H,self.p_W = self.CONFIG.MISC_patch_size
self.im_H,self.im_W = self.CONFIG.MISC_im_size
self.misc = []
# If grid_game is enabled calculate grid_size
if CONFIG.MISC_grid_game:
self.grid_size = (int(self.im_H / self.p_H) , int(self.im_W / self.p_W))
def initialize(self, image, loc_goal=None, loc_start=None, probs_diff=None):
# if enabled we have received a signal interrupt and should exit
if CONFIG.TERMINATE:
sys.exit(1)
image = image.detach()
self.image = image
self.step = 0
self.time = time.perf_counter()
self.misc = []
# Allocate for those that are always active
self.weights = torch.zeros((self.ep_len-1)).to(self.CONFIG.device)
# TODO: Maybe add an extra dimension if we need duplicate storage
# If softmax agent enabled the action is a onehotencoding
self.actions = torch.zeros((self.ep_len-1 , 8)).to(self.CONFIG.device)
self.locs = torch.zeros((self.ep_len , 4))
# Check if instance segmentation input is enabled
n_chan_images = 3
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['dubai']:
n_chan_images = 6
elif CONFIG.MISC_dataset in ['masa', 'masa_filt']:
n_chan_images = 4
else:
raise(Exception("Define which type of segmentation this dataset has"))
# Take care of extra channel for some of the crops
n_chan_crops = n_chan_images
self.dists = torch.zeros((self.ep_len))
self.crops = torch.zeros((self.ep_len, n_chan_crops, self.p_H, self.p_W)).to(self.CONFIG.device)
# Sample or load the start and goal crops
if loc_goal is not None and loc_start is not None:
self.loc_goal = loc_goal
self.loc_start = loc_start
self.crop_goal,self.loc_goal = get_deterministic_crops(image,loc_goal)
# Either sample or load the start patch
self.crops[self.step,:,:,:] ,self.loc_start= get_deterministic_crops(image,loc_start)
else:
if probs_diff is None:
self.crops[0,:,:,:] , self.loc_start = get_random_crops(image)
self.crop_goal, self.loc_goal = get_random_crops(image,self.loc_start.cpu(),
max_dist= self.CONFIG.RL_max_start_goal_dist,
min_iou = self.CONFIG.RL_min_start_goal_iou)
self.correct_act = torch.zeros(1,8)
#self.crops[0,:,:,:], self.crop_goal, self.correct_act, self.loc_start,self.loc_goal = sample_doerch_crops(image)
else:
# Sample game with random difficulty according to vector
diff = np.random.choice(4, p = probs_diff.numpy()) + 1
game = sample_grid_games_fixed_distance( diff, 1) * CONFIG.MISC_step_sz
self.crops[0,:,:,:], self.loc_start = get_deterministic_crops(image, game[:,0:2])
self.crop_goal , self.loc_goal = get_deterministic_crops(image, game[:,2:])
self.correct_act = torch.zeros(1,8)
self.crop_goal = self.crop_goal.to(self.CONFIG.device)
self.min_steps = ((self.loc_goal[0,:2] - self.loc_start[0,:2])/int(self.p_H*CONFIG.RL_softmax_step_size)).abs().int().max()
if CONFIG.MISC_grid_game:
self.dists[self.step] = ((self.loc_goal[0,:2] - self.loc_start[0,:2])/int(self.p_H*CONFIG.RL_softmax_step_size)).abs().int().max()
else:
self.dists[self.step] = torch.linalg.norm((self.loc_start-self.loc_goal)[:2])
self.locs[0 ,:] = self.loc_start
self.loc_current = self.loc_start.clone().detach()
# If enabled, create a grid of all possible locations in the image
# fill this grid with ones for where the agent has been
if CONFIG.RL_priv_grid_location:
hg, wg = int(self.im_H / self.p_H) , int(self.im_W / self.p_W)
self.grid_loc = torch.zeros((CONFIG.RL_max_episode_length, hg, wg)).to(CONFIG.device)
self.grid_curr_loc = torch.zeros((CONFIG.RL_max_episode_length, hg, wg)).to(CONFIG.device)
grid_loc_start = int(self.loc_start[0,0].item() / self.p_H) , int(self.loc_start[0,1].item() / self.p_W)
# Fill in initial position
self.grid_loc[:,grid_loc_start[0] ,grid_loc_start[1]] = 1
self.grid_curr_loc[0,grid_loc_start[0] , grid_loc_start[1]] = 1
def update(self, action, reward, loc, crop, misc=None):
# Add a new step to the trajectory
self.actions[self.step,:] = action
self.weights[self.step] = reward
self.step += 1
self.locs[self.step , :] = loc
self.misc.append(misc)
if CONFIG.MISC_grid_game:
self.dists[self.step] = ((self.loc_goal[0,:2] - loc[0,:2])/int(self.p_H*CONFIG.RL_softmax_step_size)).abs().int().max()
else:
self.dists[self.step] = torch.linalg.norm((loc-self.loc_goal)[:2])
self.crops[self.step, :,:,:] = crop
self.loc_current = loc.detach().clone()
# fill in the grid location of where the agent is currently
if CONFIG.RL_priv_grid_location:
grid_loc = int(loc[0,0].item() / self.p_H) , int(loc[0,1].item() / self.p_W)
# If agent has walked outside, don't fill in
if (grid_loc[0] < 0 or grid_loc[0] >= self.grid_size[0]) or (grid_loc[1] < 0 or grid_loc[1] >= self.grid_size[1]):
pass
else:
self.grid_loc[(self.step-1): , grid_loc[0] , grid_loc[1] ] = 1
self.grid_curr_loc[(self.step -1), grid_loc[0] , grid_loc[1] ] = 1
def finish(self):
self.time = (time.perf_counter() - self.time)/self.step
self.dists = self.dists[:self.step]
self.crops = self.crops[:self.step + 1,:,:,:]
self.weights = self.weights[:self.step]
self.weights = rewards_to_go(self.weights)
self.actions = self.actions[:self.step,:]
self.locs = self.locs[0:self.step +1, :]
# Compute final iou
self.iou = compute_iou(self.loc_goal , self.locs[-1,:])
self.hasConverged = self.iou >= CONFIG.RL_done_iou
| 18,061 | 44.38191 | 181 | py |
airloc | airloc-master/utils/normalize_dataset.py | #!/bin/env python3
import os
import numpy as np
import imageio as iio
import time
import torch
import json
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import random_split,Subset
from utils.dataset_utils import CustomDataset, Dubai, Masa, MasaFilt, MasaFull,MasaSeven, DubaiSeven, ImagesPre
from utils.utils import load_normalize_data
from config import CONFIG
import argparse
"""
Training set:
RGB-means [128.65051054 118.45636216 130.87956071]
RGB-stds [42.03129609 39.19244565 41.53636231]
"""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset" , type = str, default = CONFIG.MISC_dataset, help = "Select which dataset to normalize.")
args = parser.parse_args()
# Set dataset
dataset = args.dataset
print(f"Calculating the mean and std for {dataset}")
# Global vars
BASE_PATH_DATA = CONFIG.MISC_dataset_path
SEED = 0
batch_size = 1
n_chan = 3
transform = None
download = False
if dataset == 'masa':
transform = transforms.Compose([transforms.ToTensor()])
trainset = Masa(CONFIG.MISC_dataset_path,split = 'train',transform = transform)
elif dataset == 'masa_filt':
transform = transforms.Compose([transforms.ToTensor()])
trainset = MasaFilt(CONFIG.MISC_dataset_path,split = 'train',transform = transform)
elif dataset == 'dubai':
CONFIG.RL_priv_use_seg = True
trainset = Dubai(CONFIG.MISC_dataset_path,split = 'train',transform = transform)
n_chan = 6
elif dataset.startswith('custom_'):
trainset = CustomDataset(CONFIG.MISC_dataset_path, CONFIG.MISC_dataset[7:], split='train', transform = transform)
elif dataset == 'masa_full':
trainset = MasaFull(CONFIG.MISC_dataset_path , split = 'train' , transform = transform)
n_chan = 3
elif dataset == 'masa_seven':
trainset = MasaSeven(CONFIG.MISC_dataset_path, split = 'train', transform = transform)
n_chan = 3
elif dataset == 'dubai_seven':
trainset = DubaiSeven(CONFIG.MISC_dataset_path, split = 'train', transform = transform)
n_chan = 3
elif dataset == 'images_pre':
transform = transforms.Compose([transforms.ToTensor()])
trainset = ImagesPre(CONFIG.MISC_dataset_path, split = 'train', transform = transform)
n_chan = 3
elif dataset == 'images_post':
transform = transforms.Compose([transforms.ToTensor()])
trainset = ImagesPre(CONFIG.MISC_dataset_path, split = 'train', transform = transform, post_instead=True)
n_chan = 3
else:
raise(Exception("Unknown dataset"))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=download, num_workers=2 )
# Calculate mean and std of pixel values in dataset
# See this page for how to do it incrementally with respect to std
# https://stackoverflow.com/questions/5543651/computing-standard-deviation-in-a-stream
img_means = np.zeros(n_chan)
img_s0s = np.zeros(n_chan)
img_s1s = np.zeros(n_chan)
img_s2s = np.zeros(n_chan)
for it, frame in enumerate(trainloader):
frame = frame[0].detach().numpy()
IM_H ,IM_W = frame.shape[-2:]
# Update running means
for chan_idx in range(n_chan):
img_means[chan_idx] = (it * img_means[chan_idx] + np.mean(frame[:, chan_idx, :,:])) / (it + 1)
img_s0s[chan_idx] += IM_H * IM_W
img_s1s[chan_idx] += np.sum(frame[:, chan_idx, :,:])
img_s2s[chan_idx] += np.sum(frame[:, chan_idx, :,:] * frame[:, chan_idx, :,:])
# Display
if it % 100 == 0:
img_stds = np.sqrt(np.abs(((img_s0s * img_s2s) - img_s1s * img_s1s) / (img_s0s * (img_s0s - 1))))
print(f"Iter {it}/{len(trainloader)}")
print("RGB-means", img_means)
print("RGB-stds", img_stds)
dataset_path = dataset if not dataset.startswith('custom_') else os.path.join("Custom", dataset[7:])
img_stds = np.sqrt(np.abs(((img_s0s * img_s2s) - img_s1s * img_s1s) / (img_s0s * (img_s0s - 1))))
stat_path = os.path.join(CONFIG.MISC_dataset_path,dataset_path,'stats.json')
stats = {"means":list(img_means),"stds":list(img_stds)}
with open(stat_path, 'w') as fp:
json.dump(stats,fp,indent = 4)
print(f"Done!! \nThe mean and std for {dataset} is:")
print("RGB-means", img_means)
print("RGB-stds", img_stds)
| 4,167 | 37.592593 | 121 | py |
emcee3 | emcee3-master/docs/conf.py | # -*- coding: utf-8 -*-
import os
import sys
d = os.path.dirname
sys.path.insert(0, d(d(os.path.abspath(__file__))))
import emcee3 # NOQA
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_gallery.gen_gallery",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
# General information about the project.
project = u"emcee3"
copyright = u"2014-2016 Dan Foreman-Mackey & contributors"
version = emcee3.__version__
release = emcee3.__version__
exclude_patterns = ["_build", "_static/notebooks/profile"]
pygments_style = "sphinx"
# Sphinx-gallery
sphinx_gallery_conf = dict(
examples_dirs="../examples",
gallery_dirs="examples",
)
# Readthedocs.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
htmp_theme_options = dict(
analytics_id="analytics_id",
)
html_context = dict(
display_github=True,
github_user="dfm",
github_repo="emcee",
github_version="emcee3",
conf_py_path="/docs/",
favicon="favicon.png",
script_files=[
"_static/jquery.js",
"_static/underscore.js",
"_static/doctools.js",
"//cdn.mathjax.org/mathjax/latest/MathJax.js"
"?config=TeX-AMS-MML_HTMLorMML",
"_static/js/analytics.js",
],
)
html_static_path = ["_static"]
html_show_sourcelink = False
| 1,488 | 22.265625 | 62 | py |
u-unwrap3D | u-unwrap3D-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'u-Unwrap3D'
copyright = '2023, Felix Y. Zhou'
author = 'Felix Y. Zhou'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {} | 2,702 | 31.178571 | 79 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/fc.py | """
This code is from Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang's repository.
https://github.com/jnhwkim/ban-vqa
"""
from __future__ import print_function
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims, act='ReLU', dropout=0):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
if ''!=act:
layers.append(getattr(nn, act)())
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
if ''!=act:
layers.append(getattr(nn, act)())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
if __name__ == '__main__':
fc1 = FCNet([10, 20, 10])
print(fc1)
print('============')
fc2 = FCNet([10, 20])
print(fc2)
| 1,212 | 27.880952 | 76 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/main.py | """
This code is modified based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa)
"""
import os
import argparse
import torch
from torch.utils.data import DataLoader, ConcatDataset
import dataset_VQA
import base_model
from train import train
import utils
try:
import _pickle as pickle
except:
import pickle
def parse_args():
parser = argparse.ArgumentParser()
# MODIFIABLE MEVF HYPER-PARAMETERS--------------------------------------------------------------------------------
# Model loading/saving
parser.add_argument('--input', type=str, default=None,
help='input file directory for continue training from stop one')
parser.add_argument('--output', type=str, default='saved_models/san_mevf',
help='save file directory')
# Utilities
parser.add_argument('--seed', type=int, default=1204,
help='random seed')
parser.add_argument('--epochs', type=int, default=40,
help='the number of epoches')
parser.add_argument('--lr', default=0.005, type=float, metavar='lr',
help='initial learning rate')
# Gradient accumulation
parser.add_argument('--batch_size', type=int, default=32,
help='batch size')
parser.add_argument('--update_freq', default='1', metavar='N',
help='update parameters every n batches in an epoch')
# Choices of attention models
parser.add_argument('--model', type=str, default='SAN', choices=['BAN', 'SAN'],
help='the model we use')
# Choices of RNN models
parser.add_argument('--rnn', type=str, default='LSTM', choices=['LSTM', 'GRU'],
help='the RNN we use')
# BAN - Bilinear Attention Networks
parser.add_argument('--gamma', type=int, default=2,
help='glimpse in Bilinear Attention Networks')
parser.add_argument('--use_counter', action='store_true', default=False,
help='use counter module')
# SAN - Stacked Attention Networks
parser.add_argument('--num_stacks', default=2, type=int,
help='num of stacks in Stack Attention Networks')
# Utilities - support testing, gpu training or sampling
parser.add_argument('--print_interval', default=20, type=int, metavar='N',
help='print per certain number of steps')
parser.add_argument('--gpu', type=int, default=0,
help='specify index of GPU using for training, to use CPU: -1')
parser.add_argument('--clip_norm', default=.25, type=float, metavar='NORM',
help='clip threshold of gradients')
# Question embedding
parser.add_argument('--question_len', default=12, type=int, metavar='N',
help='maximum length of input question')
parser.add_argument('--tfidf', type=bool, default=True,
help='tfidf word embedding?')
parser.add_argument('--op', type=str, default='c',
help='concatenated 600-D word embedding')
# Joint representation C dimension
parser.add_argument('--num_hid', type=int, default=1024,
help='dim of joint semantic features')
# Activation function + dropout for classification module
parser.add_argument('--activation', type=str, default='relu', choices=['relu'],
help='the activation to use for final classifier')
parser.add_argument('--dropout', default=0.5, type=float, metavar='dropout',
help='dropout of rate of final classifier')
# Train with VQA dataset
parser.add_argument('--use_VQA', action='store_true', default=False,
help='Using TDIUC dataset to train')
parser.add_argument('--VQA_dir', type=str,
help='RAD dir')
# Optimization hyper-parameters
parser.add_argument('--eps_cnn', default=1e-5, type=float, metavar='eps_cnn',
help='eps - batch norm for cnn')
parser.add_argument('--momentum_cnn', default=0.05, type=float, metavar='momentum_cnn',
help='momentum - batch norm for cnn')
# input visual feature dimension
parser.add_argument('--feat_dim', default=32, type=int,
help='visual feature dim')
parser.add_argument('--img_size', default=84, type=int,
help='image size')
# Auto-encoder component hyper-parameters
parser.add_argument('--autoencoder', action='store_true', default=False,
help='End to end model?')
parser.add_argument('--ae_model_path', type=str, default='pretrained_ae.pth',
help='the maml_model_path we use')
parser.add_argument('--ae_alpha', default=0.001, type=float, metavar='ae_alpha',
help='ae_alpha')
# MAML component hyper-parameters
parser.add_argument('--maml', action='store_true', default=False,
help='End to end model?')
parser.add_argument('--maml_model_path', type=str, default='pretrained_maml_pytorch_other_optimization_5shot_newmethod.pth',
help='the maml_model_path we use')
parser.add_argument('--maml_nums', type=str, default='0,1,2,3,4,5',
help='the numbers of maml models')
# Return args
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
args.maml_nums = args.maml_nums.split(',')
# create output directory and log file
utils.create_dir(args.output)
logger = utils.Logger(os.path.join(args.output, 'log.txt'))
logger.write(args.__repr__())
# Set GPU device
device = torch.device("cuda:" + str(args.gpu) if args.gpu >= 0 else "cpu")
args.device = device
# Fixed ramdom seed
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
# Load dictionary and RAD training dataset
if args.use_VQA:
dictionary = dataset_VQA.Dictionary.load_from_file(os.path.join(args.VQA_dir, 'dictionary.pkl'))
train_dset = dataset_VQA.VQAFeatureDataset('train', args, dictionary, question_len=args.question_len)
# load validation set (RAD doesnt have validation set)
if 'RAD' not in args.VQA_dir:
val_dset = dataset_VQA.VQAFeatureDataset('val', args, dictionary, question_len=args.question_len)
batch_size = args.batch_size
# Create VQA model
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(train_dset, args)
optim = None
epoch = 0
# load snapshot
if args.input is not None:
print('loading %s' % args.input)
model_data = torch.load(args.input)
model.load_state_dict(model_data.get('model_state', model_data))
model.to(device)
optim = torch.optim.Adamax(filter(lambda p: p.requires_grad, model.parameters()))
optim.load_state_dict(model_data.get('optimizer_state', model_data))
epoch = model_data['epoch'] + 1
# create training dataloader
train_loader = DataLoader(train_dset, batch_size, shuffle=True, num_workers=0, collate_fn=utils.trim_collate, pin_memory=True)
if 'RAD' not in args.VQA_dir:
eval_loader = DataLoader(val_dset, batch_size, shuffle=False, num_workers=0, collate_fn=utils.trim_collate, pin_memory=True)
else:
eval_loader = None
# training phase
train(args, model, train_loader, eval_loader, args.epochs, args.output, optim, epoch)
| 7,710 | 45.173653 | 132 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/base_model.py | """
This code is developed based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa) by Xuan B. Nguyen
"""
import torch
import torch.nn as nn
from attention import BiAttention, StackedAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
from bc import BCNet
from counting import Counter
from utils import tfidf_loading
from simple_cnn import SimpleCNN, SimpleCNN32
from learner import MAML
from auto_encoder import Auto_Encoder_Model
# Create BAN model
class BAN_Model(nn.Module):
def __init__(self, dataset, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, args, maml_v_emb, ae_v_emb):
super(BAN_Model, self).__init__()
self.args = args
self.dataset = dataset
self.op = args.op
self.glimpse = args.gamma
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
if counter is not None: # if do not use counter
self.c_prj = nn.ModuleList(c_prj)
self.classifier = classifier
self.counter = counter
self.drop = nn.Dropout(.5)
self.tanh = nn.Tanh()
if args.maml:
# init multiple maml models
if len(self.args.maml_nums) > 1:
self.maml_v_emb = nn.ModuleList(model for model in maml_v_emb)
else:
self.maml_v_emb = maml_v_emb
if args.autoencoder:
self.ae_v_emb = ae_v_emb
self.convert = nn.Linear(16384, args.feat_dim)
def forward(self, v, q):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
# get visual feature
if self.args.maml: # get maml feature
# compute multiple maml embeddings and concatenate them
if len(self.args.maml_nums) > 1:
maml_v_emb = self.maml_v_emb[0](v[0]).unsqueeze(1)
for j in range(1, len(self.maml_v_emb)):
maml_v_emb_temp = self.maml_v_emb[j](v[0]).unsqueeze(1)
maml_v_emb = torch.cat((maml_v_emb, maml_v_emb_temp), 2)
else:
maml_v_emb = self.maml_v_emb(v[0]).unsqueeze(1)
v_emb = maml_v_emb
if self.args.autoencoder: # get dae feature
encoder = self.ae_v_emb.forward_pass(v[1])
decoder = self.ae_v_emb.reconstruct_pass(encoder)
ae_v_emb = encoder.view(encoder.shape[0], -1)
ae_v_emb = self.convert(ae_v_emb).unsqueeze(1)
v_emb = ae_v_emb
if self.args.maml and self.args.autoencoder: # concatenate maml feature with dae feature
v_emb = torch.cat((maml_v_emb, ae_v_emb), 2)
# get lextual feature
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb) # [batch, q_len, q_dim]
# Attention
b_emb = [0] * self.glimpse
att, logits = self.v_att.forward_all(v_emb, q_emb) # b x g x v x q
for g in range(self.glimpse):
b_emb[g] = self.b_net[g].forward_with_weights(v_emb, q_emb, att[:,g,:,:]) # b x l x h
atten, _ = logits[:,g,:,:].max(2)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
if self.args.autoencoder:
return q_emb.sum(1), decoder
return q_emb.sum(1)
def classify(self, input_feats):
return self.classifier(input_feats)
# Create SAN model
class SAN_Model(nn.Module):
def __init__(self, w_emb, q_emb, v_att, classifier, args, maml_v_emb, ae_v_emb):
super(SAN_Model, self).__init__()
self.args = args
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.classifier = classifier
if args.maml:
# init multiple maml models
if len(self.args.maml_nums) > 1:
self.maml_v_emb = nn.ModuleList(model for model in maml_v_emb)
else:
self.maml_v_emb = maml_v_emb
if args.autoencoder:
self.ae_v_emb = ae_v_emb
self.convert = nn.Linear(16384, args.feat_dim)
def forward(self, v, q):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
# get visual feature
if self.args.maml: # get maml feature
# compute multiple maml embeddings and concatenate them
if len(self.args.maml_nums) > 1:
maml_v_emb = self.maml_v_emb[0](v[0]).unsqueeze(1)
for j in range(1, len(self.maml_v_emb)):
maml_v_emb_temp = self.maml_v_emb[j](v[0]).unsqueeze(1)
maml_v_emb = torch.cat((maml_v_emb, maml_v_emb_temp), 2)
else:
maml_v_emb = self.maml_v_emb(v[0]).unsqueeze(1)
v_emb = maml_v_emb
if self.args.autoencoder: # get dae feature
encoder = self.ae_v_emb.forward_pass(v[1])
decoder = self.ae_v_emb.reconstruct_pass(encoder)
ae_v_emb = encoder.view(encoder.shape[0], -1)
ae_v_emb = self.convert(ae_v_emb).unsqueeze(1)
v_emb = ae_v_emb
if self.args.maml and self.args.autoencoder: # concatenate maml feature with dae feature
v_emb = torch.cat((maml_v_emb, ae_v_emb), 2)
# get textual feature
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim], return final hidden state
# Attention
att = self.v_att(v_emb, q_emb)
if self.args.autoencoder:
return att, decoder
return att
def classify(self, input_feats):
return self.classifier(input_feats)
# Build BAN model
def build_BAN(dataset, args, priotize_using_counter=False):
# init word embedding module, question embedding module, and Attention network
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, .0, args.op)
q_emb = QuestionEmbedding(300 if 'c' not in args.op else 600, args.num_hid, 1, False, .0, args.rnn)
v_att = BiAttention(dataset.v_dim, args.num_hid, args.num_hid, args.gamma)
# build and load pre-trained MAML model(s)
if args.maml:
# load multiple pre-trained maml models(s)
if len(args.maml_nums) > 1:
maml_v_emb = []
for model_t in args.maml_nums:
weight_path = args.VQA_dir + '/maml/' + 't%s_'%(model_t) + args.maml_model_path
print('load initial weights MAML from: %s' % (weight_path))
# maml_v_emb = SimpleCNN32(weight_path, args.eps_cnn, args.momentum_cnn)
maml_v_emb_temp = MAML(args.VQA_dir)
maml_v_emb_temp.load_state_dict(torch.load(weight_path))
maml_v_emb.append(maml_v_emb_temp)
else:
weight_path = args.VQA_dir + '/maml/' + 't%s_' % (args.maml_nums[0]) + args.maml_model_path
print('load initial weights MAML from: %s' % (weight_path))
# maml_v_emb = SimpleCNN32(weight_path, args.eps_cnn, args.momentum_cnn)
maml_v_emb = MAML(args.VQA_dir)
maml_v_emb.load_state_dict(torch.load(weight_path))
# build and load pre-trained Auto-encoder model
if args.autoencoder:
ae_v_emb = Auto_Encoder_Model()
weight_path = args.VQA_dir + '/' + args.ae_model_path
print('load initial weights DAE from: %s'%(weight_path))
ae_v_emb.load_state_dict(torch.load(weight_path))
# Loading tfidf weighted embedding
if hasattr(args, 'tfidf'):
w_emb = tfidf_loading(args.tfidf, w_emb, args)
# Optional module: counter for BAN
use_counter = args.use_counter if priotize_using_counter is None else priotize_using_counter
if use_counter or priotize_using_counter:
objects = 10 # minimum number of boxes
if use_counter or priotize_using_counter:
counter = Counter(objects)
else:
counter = None
# init BAN residual network
b_net = []
q_prj = []
c_prj = []
for i in range(args.gamma):
b_net.append(BCNet(dataset.v_dim, args.num_hid, args.num_hid, None, k=1))
q_prj.append(FCNet([args.num_hid, args.num_hid], '', .2))
if use_counter or priotize_using_counter:
c_prj.append(FCNet([objects + 1, args.num_hid], 'ReLU', .0))
# init classifier
classifier = SimpleClassifier(
args.num_hid, args.num_hid * 2, dataset.num_ans_candidates, args)
# contruct VQA model and return
if args.maml and args.autoencoder:
return BAN_Model(dataset, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, args, maml_v_emb,
ae_v_emb)
elif args.maml:
return BAN_Model(dataset, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, args, maml_v_emb,
None)
elif args.autoencoder:
return BAN_Model(dataset, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, args, None,
ae_v_emb)
return BAN_Model(dataset, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, args, None, None)
# Build SAN model
def build_SAN(dataset, args):
# init word embedding module, question embedding module, and Attention network
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0, args.op)
q_emb = QuestionEmbedding(300 if 'c' not in args.op else 600, args.num_hid, 1, False, 0.0, args.rnn)
v_att = StackedAttention(args.num_stacks, dataset.v_dim, args.num_hid, args.num_hid, dataset.num_ans_candidates,
args.dropout)
# build and load pre-trained MAML model(s)
if args.maml:
# load multiple pre-trained maml models(s)
if len(args.maml_nums) > 1:
maml_v_emb = []
for model_t in args.maml_nums:
weight_path = args.VQA_dir + '/maml/' + 't%s_'%(model_t) + args.maml_model_path
print('load initial weights MAML from: %s' % (weight_path))
# maml_v_emb = SimpleCNN32(weight_path, args.eps_cnn, args.momentum_cnn)
maml_v_emb_temp = MAML(args.VQA_dir)
maml_v_emb_temp.load_state_dict(torch.load(weight_path))
maml_v_emb.append(maml_v_emb_temp)
else:
weight_path = args.VQA_dir + '/maml/' + 't%s_' % (args.maml_nums[0]) + args.maml_model_path
print('load initial weights MAML from: %s' % (weight_path))
maml_v_emb = MAML(args.VQA_dir)
maml_v_emb.load_state_dict(torch.load(weight_path))
# build and load pre-trained Auto-encoder model
if args.autoencoder:
ae_v_emb = Auto_Encoder_Model()
weight_path = args.VQA_dir + '/' + args.ae_model_path
print('load initial weights DAE from: %s'%(weight_path))
ae_v_emb.load_state_dict(torch.load(weight_path))
# Loading tfidf weighted embedding
if hasattr(args, 'tfidf'):
w_emb = tfidf_loading(args.tfidf, w_emb, args)
# init classifier
classifier = SimpleClassifier(
args.num_hid, 2 * args.num_hid, dataset.num_ans_candidates, args)
# contruct VQA model and return
if args.maml and args.autoencoder:
return SAN_Model(w_emb, q_emb, v_att, classifier, args, maml_v_emb, ae_v_emb)
elif args.maml:
return SAN_Model(w_emb, q_emb, v_att, classifier, args, maml_v_emb, None)
elif args.autoencoder:
return SAN_Model(w_emb, q_emb, v_att, classifier, args, None, ae_v_emb)
return SAN_Model(w_emb, q_emb, v_att, classifier, args, None, None) | 11,762 | 44.949219 | 141 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/test.py | """
This code is modified based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa) by Xuan B. Nguyen
"""
import argparse
import torch
from torch.utils.data import DataLoader
import dataset_VQA
import base_model
import utils
import pandas as pd
import os
import json
answer_types = ['CLOSED', 'OPEN', 'ALL']
quesntion_types = ['COUNT', 'COLOR', 'ORGAN', 'PRES', 'PLANE', 'MODALITY', 'POS', 'ABN', 'SIZE', 'OTHER', 'ATTRIB']
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size()).to(logits.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--ensemble', type=bool, default=False,
help='ensemble flag. If True, generate a logit file which is used in the ensemble part')
# MODIFIABLE MEVF HYPER-PARAMETERS--------------------------------------------------------------------------------
# Model loading/saving
parser.add_argument('--split', type=str, default='test')
parser.add_argument('--input', type=str, default='saved_models/SAN_MEVF',
help='input file directory for loading a model')
parser.add_argument('--output', type=str, default='results',
help='output file directory for saving VQA answer prediction file')
# Utilities
parser.add_argument('--epoch', type=str, default=19,
help='the best epoch')
# Gradient accumulation
parser.add_argument('--batch_size', type=int, default=1,
help='batch size')
# Choices of Attention models
parser.add_argument('--model', type=str, default='SAN', choices=['BAN', 'SAN'],
help='the model we use')
# Choices of RNN models
parser.add_argument('--rnn', type=str, default='LSTM', choices=['LSTM', 'GRU'],
help='the RNN we use')
# BAN - Bilinear Attention Networks
parser.add_argument('--gamma', type=int, default=2,
help='glimpse in Bilinear Attention Networks')
parser.add_argument('--use_counter', action='store_true', default=False,
help='use counter module')
# SAN - Stacked Attention Networks
parser.add_argument('--num_stacks', default=2, type=int,
help='num of stacks in Stack Attention Networks')
# Utilities - gpu
parser.add_argument('--gpu', type=int, default=0,
help='specify index of GPU using for training, to use CPU: -1')
# Question embedding
parser.add_argument('--op', type=str, default='c',
help='concatenated 600-D word embedding')
# Joint representation C dimension
parser.add_argument('--num_hid', type=int, default=1024,
help='dim of joint semantic features')
# Activation function + dropout for classification module
parser.add_argument('--activation', type=str, default='relu', choices=['relu'],
help='the activation to use for final classifier')
parser.add_argument('--dropout', default=0.5, type=float, metavar='dropout',
help='dropout of rate of final classifier')
# Train with RAD
parser.add_argument('--use_VQA', action='store_true', default=False,
help='Using TDIUC dataset to train')
parser.add_argument('--VQA_dir', type=str,
help='RAD dir')
# Optimization hyper-parameters
parser.add_argument('--eps_cnn', default=1e-5, type=float, metavar='eps_cnn',
help='eps - batch norm for cnn')
parser.add_argument('--momentum_cnn', default=0.05, type=float, metavar='momentum_cnn',
help='momentum - batch norm for cnn')
# input visual feature dimension
parser.add_argument('--feat_dim', default=32, type=int,
help='visual feature dim')
parser.add_argument('--img_size', default=256, type=int,
help='image size')
# Auto-encoder component hyper-parameters
parser.add_argument('--autoencoder', action='store_true', default=False,
help='End to end model?')
parser.add_argument('--ae_model_path', type=str, default='pretrained_ae.pth',
help='the maml_model_path we use')
# MAML component hyper-parameters
parser.add_argument('--maml', action='store_true', default=False,
help='End to end model?')
parser.add_argument('--maml_model_path', type=str, default='pretrained_maml_pytorch_other_optimization_5shot_newmethod.pth',
help='the maml_model_path we use')
parser.add_argument('--maml_nums', type=str, default='0,1,2,3,4,5',
help='the numbers of maml models')
# Return args
args = parser.parse_args()
return args
# Load questions
def get_question(q, dataloader):
q = q.squeeze(0)
str = []
dictionary = dataloader.dataset.dictionary
for i in range(q.size(0)):
str.append(dictionary.idx2word[q[i]] if q[i] < len(dictionary.idx2word) else '_')
return ' '.join(str)
# Load answers
def get_answer(p, dataloader):
_m, idx = p.max(1)
return dataloader.dataset.label2ans[idx.item()]
# Logit computation (for train, test or evaluate)
def get_result_pathVQA(model, dataloader, device, args):
ans_types = ['other', 'yes/no', 'all']
keys = ['correct', 'total', 'score']
result = dict((i, dict((j, 0.0) for j in keys)) for i in ans_types)
answers_list = []
with torch.no_grad():
import time
t = time.time()
total_time = 0.0
for v, q, a, ans_type, q_type, _ in iter(dataloader):
if args.maml:
v[0] = v[0].reshape(v[0].shape[0], 3, args.img_size, args.img_size)
if args.autoencoder:
v[1] = v[1].reshape(v[1].shape[0], 128, 128).unsqueeze(1)
v[0] = v[0].to(device)
v[1] = v[1].to(device)
q = q.to(device)
a = a.to(device)
# inference and get logit
if args.autoencoder:
features, _ = model(v, q)
else:
features = model(v, q)
preds = model.classifier(features)
final_preds = preds
batch_score = compute_score_with_logits(final_preds, a.data).sum()
answer = {}
answer['answer_type'] = ans_type[0]
answer['predict'] = get_answer(final_preds, dataloader)
answer['ref'] = get_answer(a, dataloader)
answers_list.append(answer)
# Compute accuracy for each type answer
if ans_type[0] == "yes/no":
result[ans_type[0]]['correct'] += float(batch_score)
result[ans_type[0]]['total'] += 1
else:
result['other']['correct'] += float(batch_score)
result['other']['total'] += 1
result['all']['correct'] += float(batch_score)
result['all']['total'] += 1
total_time += time.time() - t
t = time.time()
print('time <s/sample>: ', total_time/result['all']['total'])
result['yes/no']['score'] = result['yes/no']['correct'] / result['yes/no']['total']
result['other']['score'] = result['other']['correct'] / result['other']['total']
result['all']['score'] = result['all']['correct'] / result['all']['total']
return result, answers_list
def get_result_RAD(model, dataloader, device, args):
keys = ['count', 'real', 'true', 'real_percent', 'score', 'score_percent']
question_types_result = dict((i, dict((j, dict((k, 0.0) for k in keys)) for j in quesntion_types)) for i in answer_types)
result = dict((i, dict((j, 0.0) for j in keys)) for i in answer_types)
with torch.no_grad():
for v, q, a, ans_type, q_types, p_type in iter(dataloader):
if p_type[0] != "freeform":
continue
if args.maml:
v[0] = v[0].reshape(v[0].shape[0], 84, 84).unsqueeze(1)
# v[0] = v[0].transpose(1, 3)
if args.autoencoder:
v[1] = v[1].reshape(v[1].shape[0], 128, 128).unsqueeze(1)
v[0] = v[0].to(device)
v[1] = v[1].to(device)
q = q.to(device)
a = a.to(device)
# inference and get logit
if args.autoencoder:
features, _ = model(v, q)
else:
features = model(v, q)
preds = model.classifier(features)
final_preds = preds
batch_score = compute_score_with_logits(final_preds, a.data).sum()
# Compute accuracy for each type answer
result[ans_type[0]]['count'] += 1.0
result[ans_type[0]]['true'] += float(batch_score)
result[ans_type[0]]['real'] += float(a.sum())
result['ALL']['count'] += 1.0
result['ALL']['true'] += float(batch_score)
result['ALL']['real'] += float(a.sum())
q_types = q_types[0].split(", ")
for i in q_types:
question_types_result[ans_type[0]][i]['count'] += 1.0
question_types_result[ans_type[0]][i]['true'] += float(batch_score)
question_types_result[ans_type[0]][i]['real'] += float(a.sum())
question_types_result['ALL'][i]['count'] += 1.0
question_types_result['ALL'][i]['true'] += float(batch_score)
question_types_result['ALL'][i]['real'] += float(a.sum())
for i in answer_types:
result[i]['score'] = result[i]['true']/result[i]['count']
result[i]['score_percent'] = round(result[i]['score']*100,1)
for j in quesntion_types:
if question_types_result[i][j]['count'] != 0.0:
question_types_result[i][j]['score'] = question_types_result[i][j]['true'] / question_types_result[i][j]['count']
question_types_result[i][j]['score_percent'] = round(question_types_result[i][j]['score']*100, 1)
if question_types_result[i][j]['real'] != 0.0:
question_types_result[i][j]['real_percent'] = round(question_types_result[i][j]['real']/question_types_result[i][j]['count']*100.0, 1)
return result, question_types_result
# Test phase
if __name__ == '__main__':
args = parse_args()
args.maml_nums = args.maml_nums.split(',')
print(args)
torch.backends.cudnn.benchmark = True
args.device = torch.device("cuda:" + str(args.gpu) if args.gpu >= 0 else "cpu")
# Check if evaluating on TDIUC dataset or VQA dataset
if args.use_VQA:
dictionary = dataset_VQA.Dictionary.load_from_file(os.path.join(args.VQA_dir, 'dictionary.pkl'))
eval_dset = dataset_VQA.VQAFeatureDataset(args.split, args, dictionary)
batch_size = args.batch_size
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(eval_dset, args)
print(model)
eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=0, pin_memory=True, collate_fn=utils.trim_collate)
def save_questiontype_results(outfile_path, quesntion_types_result):
for i in quesntion_types_result:
pd.DataFrame(quesntion_types_result[i]).transpose().to_csv(outfile_path + '/question_type_' + i + '.csv')
# Testing process
def process(args, model, eval_loader):
model_path = args.input + '/model_epoch%s.pth' % args.epoch
print('loading %s' % model_path)
model_data = torch.load(model_path)
# Comment because do not use multi gpu
# model = nn.DataParallel(model)
model = model.to(args.device)
model.load_state_dict(model_data.get('model_state', model_data))
model.train(False)
if not os.path.exists(args.output):
os.makedirs(args.output)
if args.use_VQA:
if 'RAD' in args.VQA_dir:
result, answers_list = get_result_RAD(model, eval_loader, args.device, args)
else:
result, answers_list = get_result_pathVQA(model, eval_loader, args.device, args)
outfile_path = args.output + '/' + args.input.split('/')[1]
outfile = outfile_path + '/results_epoch%s.json' % args.epoch
if not os.path.exists(os.path.dirname(outfile)):
os.makedirs(os.path.dirname(outfile))
print(result)
json.dump(result, open(outfile, 'w'), indent=4)
json.dump(answers_list, open(outfile_path + '/answer_list.json', 'w'), indent=4)
return
process(args, model, eval_loader)
| 12,886 | 45.189964 | 154 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/dataset_VQA.py | """
This code is modified based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa) by Xuan B. Nguyen
"""
from __future__ import print_function
import os
import json
import _pickle as cPickle
import numpy as np
import utils
import torch
from language_model import WordEmbedding
from torch.utils.data import Dataset
import itertools
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
COUNTING_ONLY = False
# Following Trott et al. (ICLR 2018)
# Interpretable Counting for Visual Question Answering
def is_howmany(q, a, label2ans):
if 'how many' in q.lower() or \
('number of' in q.lower() and 'number of the' not in q.lower()) or \
'amount of' in q.lower() or \
'count of' in q.lower():
if a is None or answer_filter(a, label2ans):
return True
else:
return False
else:
return False
def answer_filter(answers, label2ans, max_num=10):
for ans in answers['labels']:
if label2ans[ans].isdigit() and max_num >= int(label2ans[ans]):
return True
return False
class Dictionary(object):
def __init__(self, word2idx=None, idx2word=None):
if word2idx is None:
word2idx = {}
if idx2word is None:
idx2word = []
self.word2idx = word2idx
self.idx2word = idx2word
@property
def ntoken(self):
return len(self.word2idx)
@property
def padding_idx(self):
return len(self.word2idx)
def tokenize(self, sentence, add_word):
sentence = sentence.lower()
if "? -yes/no" in sentence:
sentence = sentence.replace("? -yes/no", "")
if "? -open" in sentence:
sentence = sentence.replace("? -open", "")
if "? - open" in sentence:
sentence = sentence.replace("? - open", "")
sentence = sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s').replace('...', '').replace('x ray', 'x-ray').replace('.', '')
words = sentence.split()
tokens = []
if add_word:
for w in words:
tokens.append(self.add_word(w))
else:
for w in words:
# if a word is not in dictionary, it will be replaced with the last word of dictionary.
tokens.append(self.word2idx.get(w, self.padding_idx-1))
return tokens
def dump_to_file(self, path):
cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))
print('dictionary dumped to %s' % path)
@classmethod
def load_from_file(cls, path):
print('loading dictionary from %s' % path)
word2idx, idx2word = cPickle.load(open(path, 'rb'))
d = cls(word2idx, idx2word)
return d
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def _create_entry(img, data, answer):
if None!=answer:
answer.pop('image_name')
answer.pop('qid')
entry = {
'qid' : data['qid'],
'image_name' : data['image_name'],
'image' : img,
'question' : data['question'],
'answer' : answer,
'answer_type' : data['answer_type'],
'question_type': data['question_type'],
'phrase_type' : data['phrase_type'] if 'phrase_type' in data.keys() else -1
}
return entry
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError:
return False
return True
def _load_dataset(dataroot, name, img_id2val, label2ans):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test'
"""
data_path = os.path.join(dataroot, name + 'set.json')
samples = json.load(open(data_path))
samples = sorted(samples, key=lambda x: x['qid'])
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['qid'])
utils.assert_eq(len(samples), len(answers))
entries = []
for sample, answer in zip(samples, answers):
utils.assert_eq(sample['qid'], answer['qid'])
utils.assert_eq(sample['image_name'], answer['image_name'])
img_id = sample['image_name']
if not COUNTING_ONLY or is_howmany(sample['question'], answer, label2ans):
entries.append(_create_entry(img_id2val[img_id], sample, answer))
return entries
class VQAFeatureDataset(Dataset):
def __init__(self, name, args, dictionary, dataroot='data', question_len=12):
super(VQAFeatureDataset, self).__init__()
self.args = args
assert name in ['train', 'val', 'test']
dataroot = args.VQA_dir
ans2label_path = os.path.join(dataroot, 'cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cache', 'trainval_label2ans.pkl')
self.ans2label = cPickle.load(open(ans2label_path, 'rb'))
self.label2ans = cPickle.load(open(label2ans_path, 'rb'))
self.num_ans_candidates = len(self.ans2label)
# End get the number of answer type class
self.dictionary = dictionary
# TODO: load img_id2idx
self.img_id2idx = json.load(open(os.path.join(dataroot, 'imgid2idx.json')))
self.entries = _load_dataset(dataroot, name, self.img_id2idx, self.label2ans)
# load image data for MAML module
if args.maml:
# TODO: load images
images_path = os.path.join(dataroot, 'pytorch_images'+ str(args.img_size) + '.pkl')
print('loading MAML image data from file: '+ images_path)
self.maml_images_data = cPickle.load(open(images_path, 'rb'))
# load image data for Auto-encoder module
if args.autoencoder:
# TODO: load images
if 'RAD' in self.args.VQA_dir:
images_path = os.path.join(dataroot, 'images128x128.pkl')
else:
images_path = os.path.join(dataroot, 'pytorch_images128_ae.pkl')
print('loading DAE image data from file: '+ images_path)
self.ae_images_data = cPickle.load(open(images_path, 'rb'))
# tokenization
self.tokenize(question_len)
self.tensorize()
if args.autoencoder and args.maml:
self.v_dim = args.feat_dim * len(args.maml_nums) + args.feat_dim
elif args.autoencoder:
self.v_dim = args.feat_dim
elif args.maml:
self.v_dim = args.feat_dim * len(args.maml_nums)
def tokenize(self, max_length=12):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
def tensorize(self):
if self.args.maml:
# self.maml_images_data = torch.from_numpy(self.maml_images_data)
self.maml_images_data = torch.stack(self.maml_images_data)
self.maml_images_data = self.maml_images_data.type('torch.FloatTensor')
if self.args.autoencoder:
if 'RAD' in self.args.VQA_dir:
self.ae_images_data = torch.from_numpy(self.ae_images_data)
else:
self.ae_images_data = torch.stack(self.ae_images_data)
self.ae_images_data = self.ae_images_data.type('torch.FloatTensor')
for entry in self.entries:
question = torch.from_numpy(np.array(entry['q_token']))
entry['q_token'] = question
answer = entry['answer']
if None!=answer:
labels = np.array(answer['labels'])
scores = np.array(answer['scores'], dtype=np.float32)
if len(labels):
labels = torch.from_numpy(labels)
scores = torch.from_numpy(scores)
entry['answer']['labels'] = labels
entry['answer']['scores'] = scores
else:
entry['answer']['labels'] = None
entry['answer']['scores'] = None
def __getitem__(self, index):
entry = self.entries[index]
question = entry['q_token']
answer = entry['answer']
answer_type = entry['answer_type']
question_type = entry['question_type']
image_data = [0, 0]
if self.args.maml:
if 'RAD' in self.args.VQA_dir:
maml_images_data = self.maml_images_data[entry['image']].reshape(self.args.img_size * self.args.img_size)
else:
maml_images_data = self.maml_images_data[entry['image']].reshape(3 * self.args.img_size * self.args.img_size)
image_data[0] = maml_images_data
if self.args.autoencoder:
ae_images_data = self.ae_images_data[entry['image']].reshape(128*128)
image_data[1] = ae_images_data
if None!=answer:
labels = answer['labels']
scores = answer['scores']
target = torch.zeros(self.num_ans_candidates)
if labels is not None:
target.scatter_(0, labels, scores)
return image_data, question, target, answer_type, question_type, entry['phrase_type']
# return image_data, question, target, answer_type, question_type, [0]
else:
return image_data, question, answer_type, question_type
def __len__(self):
return len(self.entries)
def tfidf_from_questions(names, args, dictionary, dataroot='data', target=['rad']):
inds = [[], []] # rows, cols for uncoalesce sparse matrix
df = dict()
N = len(dictionary)
if args.use_VQA:
dataroot = args.VQA_dir
def populate(inds, df, text):
tokens = dictionary.tokenize(text, True)
for t in tokens:
df[t] = df.get(t, 0) + 1
combin = list(itertools.combinations(tokens, 2))
for c in combin:
if c[0] < N:
inds[0].append(c[0]); inds[1].append(c[1])
if c[1] < N:
inds[0].append(c[1]); inds[1].append(c[0])
if 'rad' in target:
for name in names:
assert name in ['train', 'val', 'test']
question_path = os.path.join(dataroot, name + 'set.json')
questions = json.load(open(question_path))
for question in questions:
populate(inds, df, question['question'])
# TF-IDF
vals = [1] * len(inds[1])
for idx, col in enumerate(inds[1]):
assert df[col] >= 1, 'document frequency should be greater than zero!'
vals[col] /= df[col]
# Make stochastic matrix
def normalize(inds, vals):
z = dict()
for row, val in zip(inds[0], vals):
z[row] = z.get(row, 0) + val
for idx, row in enumerate(inds[0]):
vals[idx] /= z[row]
return vals
vals = normalize(inds, vals)
tfidf = torch.sparse.FloatTensor(torch.LongTensor(inds), torch.FloatTensor(vals))
tfidf = tfidf.coalesce()
# Latent word embeddings
emb_dim = 300
glove_file = os.path.join(dataroot, 'glove', 'glove.6B.%dd.txt' % emb_dim)
weights, word2emb = utils.create_glove_embedding_init(dictionary.idx2word[N:], glove_file)
print('tf-idf stochastic matrix (%d x %d) is generated.' % (tfidf.size(0), tfidf.size(1)))
return tfidf, weights
if __name__=='__main__':
# dictionary = Dictionary.load_from_file('data_RAD/dictionary.pkl')
# tfidf, weights = tfidf_from_questions(['train'], None, dictionary)
# w_emb = WordEmbedding(dictionary.ntoken, 300, .0, 'c')
# w_emb.init_embedding(os.path.join('data_RAD', 'glove6b_init_300d.npy'), tfidf, weights)
# with open('data_RAD/embed_tfidf_weights.pkl', 'wb') as f:
# torch.save(w_emb, f)
# print("Saving embedding with tfidf and weights successfully")
dictionary = Dictionary.load_from_file('data_RAD/dictionary.pkl')
w_emb = WordEmbedding(dictionary.ntoken, 300, .0, 'c')
with open('data_RAD/embed_tfidf_weights.pkl', 'rb') as f:
w_emb = torch.load(f)
print("Load embedding with tfidf and weights successfully")
| 12,877 | 38.024242 | 146 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/learner.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
class MAML(nn.Module):
"""
Meta Learner
"""
def __init__(self, dataset_dir):
"""
:param args:
"""
super(MAML, self).__init__()
if 'RAD' in dataset_dir:
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [3 + 1, 64*8*8])
]
else:
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [5 + 1, 32 * 5 * 5])
]
self.net = Learner(config)
self.frezze_final_layer()
def frezze_final_layer(self):
self.net.vars[16].requires_grad = False
self.net.vars[17].requires_grad = False
def forward(self, x):
return self.net.forward(x)
class Learner(nn.Module):
"""
"""
def __init__(self, config):
"""
:param config: network config file, type:list of (string, list)
"""
super(Learner, self).__init__()
self.config = config
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running_var
self.vars_bn = nn.ParameterList()
for i, (name, param) in enumerate(self.config):
if name is 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name is 'linear':
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param in self.config:
if name is 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'linear':
tmp = 'linear:(in:%d, out:%d)'%(param[1], param[0])
info += tmp + '\n'
elif name is 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)'%(param[0])
info += tmp + '\n'
elif name is 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name is 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=True):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn is updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
if vars is None:
vars = self.vars
idx = 0
bn_idx = 0
for name, param in self.config[:-1]:
if name is 'conv2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'convt2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'linear':
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
# print('forward:', idx, x.norm().item())
elif name is 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx+1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name is 'flatten':
# print(x.shape)
x = x.view(x.size(0), self.config[0][1][0], -1)
elif name is 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name is 'relu':
x = F.relu(x, inplace=param[0])
elif name is 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name is 'tanh':
x = F.tanh(x)
elif name is 'sigmoid':
x = torch.sigmoid(x)
elif name is 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name is 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name is 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
raise NotImplementedError
# make sure variable is used properly
# assert idx == len(vars)
assert bn_idx == len(self.vars_bn)
return torch.mean(x, 2)
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars is None:
for p in self.vars:
if p.grad is not None:
p.grad.zero_()
else:
for p in vars:
if p.grad is not None:
p.grad.zero_()
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars | 9,463 | 34.051852 | 111 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/counting.py | """
Learning to Count Objects in Natural Images for Visual Question Answering
Yan Zhang, Jonathon Hare, Adam Prügel-Bennett
ICLR 2018
This code is from Yan Zhang's repository.
https://github.com/Cyanogenoid/vqa-counting/blob/master/vqa-v2/counting.py
MIT License
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
class Counter(nn.Module):
""" Counting module as proposed in [1].
Count the number of objects from a set of bounding boxes and a set of scores for each bounding box.
This produces (self.objects + 1) number of count features.
[1]: Yan Zhang, Jonathon Hare, Adam Prügel-Bennett: Learning to Count Objects in Natural Images for Visual Question Answering.
https://openreview.net/forum?id=B12Js_yRb
"""
def __init__(self, objects, already_sigmoided=False):
super().__init__()
self.objects = objects
self.already_sigmoided = already_sigmoided
self.f = nn.ModuleList([PiecewiseLin(16) for _ in range(8)])
self.count_activation = None
def forward(self, boxes, attention):
""" Forward propagation of attention weights and bounding boxes to produce count features.
`boxes` has to be a tensor of shape (n, 4, m) with the 4 channels containing the x and y coordinates of the top left corner and the x and y coordinates of the bottom right corner in this order.
`attention` has to be a tensor of shape (n, m). Each value should be in [0, 1] if already_sigmoided is set to True, but there are no restrictions if already_sigmoided is set to False. This value should be close to 1 if the corresponding boundign box is relevant and close to 0 if it is not.
n is the batch size, m is the number of bounding boxes per image.
"""
# only care about the highest scoring object proposals
# the ones with low score will have a low impact on the count anyway
boxes, attention = self.filter_most_important(self.objects, boxes, attention)
# normalise the attention weights to be in [0, 1]
if not self.already_sigmoided:
attention = torch.sigmoid(attention)
relevancy = self.outer_product(attention)
distance = 1 - self.iou(boxes, boxes)
# intra-object dedup
score = self.f[0](relevancy) * self.f[1](distance)
# inter-object dedup
dedup_score = self.f[3](relevancy) * self.f[4](distance)
dedup_per_entry, dedup_per_row = self.deduplicate(dedup_score, attention)
score = score / dedup_per_entry
# aggregate the score
# can skip putting this on the diagonal since we're just summing over it anyway
correction = self.f[0](attention * attention) / dedup_per_row
score = score.sum(dim=2).sum(dim=1, keepdim=True) + correction.sum(dim=1, keepdim=True)
score = (score + 1e-20).sqrt()
one_hot = self.to_one_hot(score)
att_conf = (self.f[5](attention) - 0.5).abs()
dist_conf = (self.f[6](distance) - 0.5).abs()
conf = self.f[7](att_conf.mean(dim=1, keepdim=True) + dist_conf.mean(dim=2).mean(dim=1, keepdim=True))
return one_hot * conf
def deduplicate(self, dedup_score, att):
# using outer-diffs
att_diff = self.outer_diff(att)
score_diff = self.outer_diff(dedup_score)
sim = self.f[2](1 - score_diff).prod(dim=1) * self.f[2](1 - att_diff)
# similarity for each row
row_sims = sim.sum(dim=2)
# similarity for each entry
all_sims = self.outer_product(row_sims)
return all_sims, row_sims
def to_one_hot(self, scores):
""" Turn a bunch of non-negative scalar values into a one-hot encoding.
E.g. with self.objects = 3, 0 -> [1 0 0 0], 2.75 -> [0 0 0.25 0.75].
"""
# sanity check, I don't think this ever does anything (it certainly shouldn't)
scores = scores.clamp(min=0, max=self.objects)
# compute only on the support
i = scores.long().data
f = scores.frac()
# target_l is the one-hot if the score is rounded down
# target_r is the one-hot if the score is rounded up
target_l = scores.data.new(i.size(0), self.objects + 1).fill_(0)
target_r = scores.data.new(i.size(0), self.objects + 1).fill_(0)
target_l.scatter_(dim=1, index=i.clamp(max=self.objects), value=1)
target_r.scatter_(dim=1, index=(i + 1).clamp(max=self.objects), value=1)
# interpolate between these with the fractional part of the score
return (1 - f) * Variable(target_l) + f * Variable(target_r)
def filter_most_important(self, n, boxes, attention):
""" Only keep top-n object proposals, scored by attention weight """
attention, idx = attention.topk(n, dim=1, sorted=False)
idx = idx.unsqueeze(dim=1).expand(boxes.size(0), boxes.size(1), idx.size(1))
boxes = boxes.gather(2, idx)
return boxes, attention
def outer(self, x):
size = tuple(x.size()) + (x.size()[-1],)
a = x.unsqueeze(dim=-1).expand(*size)
b = x.unsqueeze(dim=-2).expand(*size)
return a, b
def outer_product(self, x):
# Y_ij = x_i * x_j
a, b = self.outer(x)
return a * b
def outer_diff(self, x):
# like outer products, except taking the absolute difference instead
# Y_ij = | x_i - x_j |
a, b = self.outer(x)
return (a - b).abs()
def iou(self, a, b):
# this is just the usual way to IoU from bounding boxes
inter = self.intersection(a, b)
area_a = self.area(a).unsqueeze(2).expand_as(inter)
area_b = self.area(b).unsqueeze(1).expand_as(inter)
return inter / (area_a + area_b - inter + 1e-12)
def area(self, box):
x = (box[:, 2, :] - box[:, 0, :]).clamp(min=0)
y = (box[:, 3, :] - box[:, 1, :]).clamp(min=0)
return x * y
def intersection(self, a, b):
size = (a.size(0), 2, a.size(2), b.size(2))
min_point = torch.max(
a[:, :2, :].unsqueeze(dim=3).expand(*size),
b[:, :2, :].unsqueeze(dim=2).expand(*size),
)
max_point = torch.min(
a[:, 2:, :].unsqueeze(dim=3).expand(*size),
b[:, 2:, :].unsqueeze(dim=2).expand(*size),
)
inter = (max_point - min_point).clamp(min=0)
area = inter[:, 0, :, :] * inter[:, 1, :, :]
return area
class PiecewiseLin(nn.Module):
def __init__(self, n):
super().__init__()
self.n = n
self.weight = nn.Parameter(torch.ones(n + 1))
# the first weight here is always 0 with a 0 gradient
self.weight.data[0] = 0
def forward(self, x):
# all weights are positive -> function is monotonically increasing
w = self.weight.abs()
# make weights sum to one -> f(1) = 1
w = w / w.sum()
w = w.view([self.n + 1] + [1] * x.dim())
# keep cumulative sum for O(1) time complexity
csum = w.cumsum(dim=0)
csum = csum.expand((self.n + 1,) + tuple(x.size()))
w = w.expand_as(csum)
# figure out which part of the function the input lies on
y = self.n * x.unsqueeze(0)
idx = Variable(y.long().data)
f = y.frac()
# contribution of the linear parts left of the input
x = csum.gather(0, idx.clamp(max=self.n))
# contribution within the linear segment the input falls into
x = x + f * w.gather(0, (idx + 1).clamp(max=self.n))
return x.squeeze(0)
| 7,535 | 42.310345 | 298 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/utils.py | """
This code is extended from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import errno
import os
import re
import collections
import numpy as np
import operator
import functools
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch._six import string_classes
from torch.utils.data.dataloader import default_collate
import math
import time
import dataset_VQA
EPS = 1e-7
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def print_model(model, logger):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
if logger:
logger.write('nParams=\t'+str(nParams))
def save_model(path, model, epoch, optimizer=None):
model_dict = {
'epoch': epoch,
'model_state': model.state_dict()
}
if optimizer is not None:
model_dict['optimizer_state'] = optimizer.state_dict()
torch.save(model_dict, path)
# Select the indices given by `lengths` in the second dimension
# As a result, # of dimensions is shrinked by one
# @param pad(Tensor)
# @param len(list[int])
def rho_select(pad, lengths):
# Index of the last output for each sequence.
idx_ = (lengths-1).view(-1,1).expand(pad.size(0), pad.size(2)).unsqueeze(1)
extracted = pad.gather(1, idx_).squeeze(1)
return extracted
def trim_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
_use_shared_memory = True
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if 1 < batch[0].dim(): # image features
max_num_boxes = max([x.size(0) for x in batch])
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = len(batch) * max_num_boxes * batch[0].size(-1)
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
# warning: F.pad returns Variable!
return torch.stack([F.pad(x, (0,0,0,max_num_boxes-x.size(0))).data for x in batch], 0, out=out)
else:
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [trim_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
def create_glove_embedding_init(idx2word, glove_file):
word2emb = {}
# glove_file = glove_file if args.use_TDIUC else os.path.join(args.TDIUC_dir, 'glove', glove_file.split('/')[-1])
with open(glove_file, 'r', encoding='utf-8') as f:
entries = f.readlines()
emb_dim = len(entries[0].split(' ')) - 1
print('embedding dim is %d' % emb_dim)
weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32)
for entry in entries:
vals = entry.split(' ')
word = vals[0]
vals = list(map(float, vals[1:]))
word2emb[word] = np.array(vals)
for idx, word in enumerate(idx2word):
if word not in word2emb:
continue
weights[idx] = word2emb[word]
return weights, word2emb
# --------------------FAIRSEQ functions---------------------------
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
def item(tensor):
if hasattr(tensor, 'item'):
return tensor.item()
if hasattr(tensor, '__getitem__'):
return tensor[0]
return tensor
def clip_grad_norm_(tensor, max_norm):
grad_norm = item(torch.norm(tensor))
if grad_norm > max_norm > 0:
clip_coef = max_norm / (grad_norm + 1e-6)
tensor.mul_(clip_coef)
return grad_norm
def to_sparse(x):
""" converts dense tensor x to sparse format """
x_typename = torch.typename(x).split('.')[-1]
sparse_tensortype = getattr(torch.sparse, x_typename)
indices = torch.nonzero(x)
if len(indices.shape) == 0: # if all elements are zeros
return sparse_tensortype(*x.shape)
indices = indices.t()
values = x[tuple(indices[i] for i in range(indices.shape[0]))]
return sparse_tensortype(indices, values, x.size())
def get_size_of_largest_vqa_batch(dataloader):
largest_v = None
largest_b = None
largest_q = None
largest_a = None
v, b, q, a = iter(dataloader).next()
# ignore 1st dimension (batch size)
largest_v = v.size()[1]
largest_b = b.size()[1]
largest_q = q.size()[1]
largest_a = a.size()[1]
for i, (v, b, q, a) in enumerate(dataloader):
if largest_v > v.size()[1]:
pass
def get_dummy_batch(args):
pass
def as_minutes(seconds):
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
return '%dm %ds' % (minutes, seconds)
def time_since(since, percent):
now = time.time()
seconds = now - since
elapsed_seconds = seconds / (percent)
rest_seconds = elapsed_seconds - seconds
return '%s (- %s)' % (as_minutes(seconds), as_minutes(rest_seconds))
def tfidf_loading(use_tfidf, w_emb, args):
if use_tfidf:
if args.use_VQA:
dict = dataset_VQA.Dictionary.load_from_file(os.path.join(args.VQA_dir, 'dictionary.pkl'))
# load extracted tfidf and weights from file for saving loading time
if args.use_VQA:
if os.path.isfile(os.path.join(args.VQA_dir, 'embed_tfidf_weights.pkl')) == True:
print("Loading embedding tfidf and weights from file")
with open(os.path.join(args.VQA_dir ,'embed_tfidf_weights.pkl'), 'rb') as f:
w_emb = torch.load(f)
print("Load embedding tfidf and weights from file successfully")
else:
print("Embedding tfidf and weights haven't been saving before")
tfidf, weights = dataset_VQA.tfidf_from_questions(['train', 'val'], args, dict)
w_emb.init_embedding(os.path.join(args.VQA_dir, 'glove6b_init_300d.npy'), tfidf, weights)
with open(os.path.join(args.VQA_dir ,'embed_tfidf_weights.pkl'), 'wb') as f:
torch.save(w_emb, f)
print("Saving embedding with tfidf and weights successfully")
return w_emb
def brevity_penalty(candidate, references):
c = len(candidate)
ref_lens = (len(reference) for reference in references)
r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))
if c > r:
return 1
else:
return math.exp(1 - r / c)
def modified_precision(candidate, references, n):
max_frequency = collections.defaultdict(int)
min_frequency = collections.defaultdict(int)
candidate_words = split_sentence(candidate, n)
for reference in references:
reference_words = split_sentence(reference, n)
for word in candidate_words:
max_frequency[word] = max(max_frequency[word], reference_words[word])
for word in candidate_words:
min_frequency[word] = min(max_frequency[word], candidate_words[word])
P = sum(min_frequency.values()) / sum(candidate_words.values())
return P
def split_sentence(sentence, n):
words = collections.defaultdict(int)
tmp_sentence = re.sub("[^a-zA-Z0-9]", "", sentence) #add 0-9
tmp_sentence = tmp_sentence.lower()
tmp_sentence = tmp_sentence.strip().split()
length = len(tmp_sentence)
for i in range(length - n + 1):
tmp_words = " ".join(tmp_sentence[i: i + n])
if tmp_words:
words[tmp_words] += 1
return words | 12,029 | 33.371429 | 117 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/classifier.py | """
This code is modified based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa) by Xuan B. Nguyen
"""
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
class SimpleClassifier(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, args):
super(SimpleClassifier, self).__init__()
activation_dict = {'relu': nn.ReLU()}
try:
activation_func = activation_dict[args.activation]
except:
raise AssertionError(args.activation + " is not supported yet!")
layers = [
weight_norm(nn.Linear(in_dim, hid_dim), dim=None),
activation_func,
nn.Dropout(args.dropout, inplace=True),
weight_norm(nn.Linear(hid_dim, out_dim), dim=None)
]
self.main = nn.Sequential(*layers)
def forward(self, x):
logits = self.main(x)
return logits
| 936 | 35.038462 | 140 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/simple_cnn.py | """
MAML module for MEVF model
This code is written by Binh X. Nguyen and Binh D. Nguyen
<link paper>
"""
import torch
import torch.nn as nn
import numpy as np
import pickle
import torch.nn.functional as F
class SimpleCNN(nn.Module):
def __init__(self, weight_path='simple_cnn.weights', eps_cnn=1e-5, momentum_cnn=0.05):
super(SimpleCNN, self).__init__()
# init and load pre-trained model
weights = self.load_weight(weight_path)
self.conv1 = self.init_conv(1, 64, weights['conv1'], weights['b1'])
self.conv1_bn = nn.BatchNorm2d(num_features=64, eps=eps_cnn, affine=True, momentum=momentum_cnn)
self.conv2 = self.init_conv(64, 64, weights['conv2'], weights['b2'])
self.conv2_bn = nn.BatchNorm2d(num_features=64, eps=eps_cnn, affine=True, momentum=momentum_cnn)
self.conv3 = self.init_conv(64, 64, weights['conv3'], weights['b3'])
self.conv3_bn = nn.BatchNorm2d(num_features=64, eps=eps_cnn, affine=True, momentum=momentum_cnn)
self.conv4 = self.init_conv(64, 64, weights['conv4'], weights['b4'])
self.conv4_bn = nn.BatchNorm2d(num_features=64, eps=eps_cnn, affine=True, momentum=momentum_cnn)
def load_weight(self, path):
return pickle.load(open(path, 'rb'))
def forward(self, X):
out = F.relu(self.conv1(X))
out = self.conv1_bn(out)
out = F.relu(self.conv2(out))
out = self.conv2_bn(out)
out = F.relu(self.conv3(out))
out = self.conv3_bn(out)
out = F.relu(self.conv4(out))
out = self.conv4_bn(out)
out = out.view(-1, 64, 36)
return torch.mean(out, 2)
def convert_to_torch_weight(self, weight):
return np.transpose(weight, [3, 2, 0, 1])
def init_conv(self, inp, out, weight, bias, convert=True):
conv = nn.Conv2d(inp, out, 3, 2, 1, bias=True)
if convert:
weight = self.convert_to_torch_weight(weight)
conv.weight.data = torch.Tensor(weight).float()
conv.bias.data = torch.Tensor(bias).float()
return conv
class SimpleCNN32(nn.Module):
def __init__(self, weight_path='simple_cnn.weights', eps_cnn=1e-5, momentum_cnn=0.05):
super(SimpleCNN32, self).__init__()
# init and load pre-trained model
weights = self.load_weight(weight_path)
self.conv1 = self.init_conv(3, 32, weights['conv1'], weights['b1'])
self.conv1_bn = nn.BatchNorm2d(num_features=32, eps=eps_cnn, affine=True, momentum=momentum_cnn)
self.conv2 = self.init_conv(32, 32, weights['conv2'], weights['b2'])
self.conv2_bn = nn.BatchNorm2d(num_features=32, eps=eps_cnn, affine=True, momentum=momentum_cnn)
self.conv3 = self.init_conv(32, 32, weights['conv3'], weights['b3'])
self.conv3_bn = nn.BatchNorm2d(num_features=32, eps=eps_cnn, affine=True, momentum=momentum_cnn)
self.conv4 = self.init_conv(32, 32, weights['conv4'], weights['b4'])
self.conv4_bn = nn.BatchNorm2d(num_features=32, eps=eps_cnn, affine=True, momentum=momentum_cnn)
def load_weight(self, path):
return pickle.load(open(path, 'rb'))
def forward(self, X):
out = F.relu(self.conv1(X))
out = self.conv1_bn(out)
out = F.relu(self.conv2(out))
out = self.conv2_bn(out)
out = F.relu(self.conv3(out))
out = self.conv3_bn(out)
out = F.relu(self.conv4(out))
out = self.conv4_bn(out)
out = out.view(-1, 32, 256)
return torch.mean(out, 2)
def convert_to_torch_weight(self, weight):
return np.transpose(weight, [3, 2, 0, 1])
def init_conv(self, inp, out, weight, bias, convert=True):
conv = nn.Conv2d(inp, out, 3, 2, 1, bias=True)
if convert:
weight = self.convert_to_torch_weight(weight)
conv.weight.data = torch.Tensor(weight).float()
conv.bias.data = torch.Tensor(bias).float()
return conv
if __name__ == '__main__':
simple_cnn = SimpleCNN(weight_path='simple_cnn.weights', eps_cnn=1e-5, momentum_cnn=0.05)
npo = np.random.random((3, 1, 84, 84))
x = torch.tensor(npo, dtype=torch.float32).float()
simple_cnn(x)
| 4,169 | 41.121212 | 104 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/bc.py | """
This code is from Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang's repository.
https://github.com/jnhwkim/ban-vqa
"""
from __future__ import print_function
import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from fc import FCNet
class BCNet(nn.Module):
"""Simple class for non-linear bilinear connect network
"""
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU', dropout=[.2,.5], k=3):
super(BCNet, self).__init__()
self.c = 32
self.k = k
self.v_dim = v_dim; self.q_dim = q_dim
self.h_dim = h_dim; self.h_out = h_out
self.v_net = FCNet([v_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.q_net = FCNet([q_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1]) # attention
if 1 < k:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if None == h_out:
pass
elif h_out <= self.c:
self.h_mat = nn.Parameter(torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(nn.Linear(h_dim*self.k, h_out), dim=None)
def forward(self, v, q):
if None == self.h_out:
v_ = self.v_net(v).transpose(1,2).unsqueeze(3)
q_ = self.q_net(q).transpose(1,2).unsqueeze(2)
d_ = torch.matmul(v_, q_) # b x h_dim x v x q
logits = d_.transpose(1,2).transpose(2,3) # b x v x q x h_dim
return logits
# broadcast Hadamard product, matrix-matrix production
# fast computation but memory inefficient
# epoch 1, time: 157.84
elif self.h_out <= self.c:
v_ = self.dropout(self.v_net(v)).unsqueeze(1)
q_ = self.q_net(q)
h_ = v_ * self.h_mat # broadcast, b x h_out x v x h_dim
logits = torch.matmul(h_, q_.unsqueeze(1).transpose(2,3)) # b x h_out x v x q
logits = logits + self.h_bias
return logits # b x h_out x v x q
# batch outer product, linear projection
# memory efficient but slow computation
# epoch 1, time: 304.87
else:
v_ = self.dropout(self.v_net(v)).transpose(1,2).unsqueeze(3)
q_ = self.q_net(q).transpose(1,2).unsqueeze(2)
d_ = torch.matmul(v_, q_) # b x h_dim x v x q
logits = self.h_net(d_.transpose(1,2).transpose(2,3)) # b x v x q x h_out
return logits.transpose(2,3).transpose(1,2) # b x h_out x v x q
def forward_with_weights(self, v, q, w):
v_ = self.v_net(v).transpose(1,2).unsqueeze(2) # b x d x 1 x v
q_ = self.q_net(q).transpose(1,2).unsqueeze(3) # b x d x q x 1
logits = torch.matmul(torch.matmul(v_.float(), w.unsqueeze(1).float()), q_.float()).type_as(v_) # b x d x 1 x 1
# logits = torch.matmul(torch.matmul(v_, w.unsqueeze(1)), q_)# b x d x 1 x 1
logits = logits.squeeze(3).squeeze(2)
if 1 < self.k:
logits = logits.unsqueeze(1) # b x 1 x d
logits = self.p_net(logits).squeeze(1) * self.k # sum-pooling
return logits
if __name__=='__main__':
net = BCNet(1024,1024,1024,1024).cuda()
x = torch.Tensor(512,36,1024).cuda()
y = torch.Tensor(512,14,1024).cuda()
out = net.forward(x,y)
| 3,408 | 40.573171 | 119 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/attention.py | """
This code is extended from Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang's repository.
https://github.com/jnhwkim/ban-vqa
This code is modified from ZCYang's repository.
https://github.com/zcyang/imageqa-san
"""
import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from bc import BCNet
# Bilinear Attention
class BiAttention(nn.Module):
def __init__(self, x_dim, y_dim, z_dim, glimpse, dropout=[.2,.5]):
super(BiAttention, self).__init__()
self.glimpse = glimpse
self.logits = weight_norm(BCNet(x_dim, y_dim, z_dim, glimpse, dropout=dropout, k=3), \
name='h_mat', dim=None)
def forward(self, v, q, v_mask=True):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
p, logits = self.forward_all(v, q, v_mask)
return p, logits
def forward_all(self, v, q, v_mask=True):
v_num = v.size(1)
q_num = q.size(1)
logits = self.logits(v, q) # b x g x v x q
if v_mask:
mask = (0 == v.abs().sum(2)).unsqueeze(1).unsqueeze(3).expand(logits.size())
logits.data.masked_fill_(mask.data, -float('inf'))
p = nn.functional.softmax(logits.view(-1, self.glimpse, v_num * q_num), 2)
return p.view(-1, self.glimpse, v_num, q_num), logits
# Stacked Attention
class StackedAttention(nn.Module):
def __init__(self, num_stacks, img_feat_size, ques_feat_size, att_size, output_size, drop_ratio):
super(StackedAttention, self).__init__()
self.img_feat_size = img_feat_size
self.ques_feat_size = ques_feat_size
self.att_size = att_size
self.output_size = output_size
self.drop_ratio = drop_ratio
self.num_stacks = num_stacks
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(drop_ratio)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.fc11 = nn.Linear(ques_feat_size, att_size, bias=True)
self.fc12 = nn.Linear(img_feat_size, att_size, bias=False)
self.fc13 = nn.Linear(att_size, 1, bias=True)
for stack in range(num_stacks - 1):
self.layers.append(nn.Linear(att_size, att_size, bias=True))
self.layers.append(nn.Linear(img_feat_size, att_size, bias=False))
self.layers.append(nn.Linear(att_size, 1, bias=True))
def forward(self, img_feat, ques_feat, v_mask=True):
# Batch size
B = ques_feat.size(0)
# Stack 1
ques_emb_1 = self.fc11(ques_feat)
img_emb_1 = self.fc12(img_feat)
# Compute attention distribution
h1 = self.tanh(ques_emb_1.view(B, 1, self.att_size) + img_emb_1)
h1_emb = self.fc13(self.dropout(h1))
# Mask actual bounding box sizes before calculating softmax
if v_mask:
mask = (0 == img_emb_1.abs().sum(2)).unsqueeze(2).expand(h1_emb.size())
h1_emb.data.masked_fill_(mask.data, -float('inf'))
p1 = self.softmax(h1_emb)
# Compute weighted sum
img_att_1 = img_emb_1*p1
weight_sum_1 = torch.sum(img_att_1, dim=1)
# Combine with question vector
u1 = ques_emb_1 + weight_sum_1
# Other stacks
us = []
ques_embs = []
img_embs = []
hs = []
h_embs =[]
ps = []
img_atts = []
weight_sums = []
us.append(u1)
for stack in range(self.num_stacks - 1):
ques_embs.append(self.layers[3 * stack + 0](us[-1]))
img_embs.append(self.layers[3 * stack + 1](img_feat))
# Compute attention distribution
hs.append(self.tanh(ques_embs[-1].view(B, -1, self.att_size) + img_embs[-1]))
h_embs.append(self.layers[3*stack + 2](self.dropout(hs[-1])))
# Mask actual bounding box sizes before calculating softmax
if v_mask:
mask = (0 == img_embs[-1].abs().sum(2)).unsqueeze(2).expand(h_embs[-1].size())
h_embs[-1].data.masked_fill_(mask.data, -float('inf'))
ps.append(self.softmax(h_embs[-1]))
# Compute weighted sum
img_atts.append(img_embs[-1] * ps[-1])
weight_sums.append(torch.sum(img_atts[-1], dim=1))
# Combine with previous stack
ux = us[-1] + weight_sums[-1]
# Combine with previous stack by multiple
us.append(ux)
return us[-1]
| 4,448 | 33.488372 | 101 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/train.py | """
This code is modified based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa) by Xuan B. Nguyen
"""
import os
import time
import torch
import utils
import torch.nn as nn
from trainer import Trainer
warmup_updates = 4000
# Kaiming normalization initialization
def init_weights(m):
if type(m) == nn.Linear:
with torch.no_grad():
torch.nn.init.kaiming_normal_(m.weight)
# VQA score computation
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size()).to(logits.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
# Train phase
def train(args, model, train_loader, eval_loader, num_epochs, output, opt=None, s_epoch=0):
device = args.device
# Scheduler learning rate
lr_default = args.lr
lr_decay_step = 2
lr_decay_rate = .75
lr_decay_epochs = range(10,20,lr_decay_step) if eval_loader is not None else range(10,20,lr_decay_step)
gradual_warmup_steps = [0.5 * lr_default, 1.0 * lr_default, 1.5 * lr_default, 2.0 * lr_default]
saving_epoch = 0 # Start point for model saving
grad_clip = args.clip_norm
utils.create_dir(output)
# Adamax optimizer
optim = torch.optim.Adamax(filter(lambda p: p.requires_grad, model.parameters()), lr=lr_default) \
if opt is None else opt
# Loss function
criterion = torch.nn.BCEWithLogitsLoss(reduction='sum')
ae_criterion = torch.nn.MSELoss()
# write hyper-parameter to log file
logger = utils.Logger(os.path.join(output, 'log.txt'))
logger.write(args.__repr__())
utils.print_model(model, logger)
logger.write('optim: adamax lr=%.4f, decay_step=%d, decay_rate=%.2f, grad_clip=%.2f' % \
(lr_default, lr_decay_step, lr_decay_rate, grad_clip))
# create trainer
trainer = Trainer(args, model, criterion, optim, ae_criterion)
update_freq = int(args.update_freq)
wall_time_start = time.time()
best_eval_score = 0
# Epoch passing in training phase
for epoch in range(s_epoch, num_epochs):
total_loss = 0
train_score = 0
total_norm = 0
count_norm = 0
num_updates = 0
t = time.time()
N = len(train_loader.dataset)
num_batches = int(N/args.batch_size + 1)
if epoch < len(gradual_warmup_steps):
trainer.optimizer.param_groups[0]['lr'] = gradual_warmup_steps[epoch]
logger.write('gradual warm up lr: %.4f' % trainer.optimizer.param_groups[0]['lr'])
elif epoch in lr_decay_epochs:
trainer.optimizer.param_groups[0]['lr'] *= lr_decay_rate
logger.write('decreased lr: %.4f' % trainer.optimizer.param_groups[0]['lr'])
else:
logger.write('lr: %.4f' % trainer.optimizer.param_groups[0]['lr'])
# Predicting and computing score
for i, (v, q, a, _, _, _) in enumerate(train_loader):
if args.maml:
if 'RAD' in args.VQA_dir:
v[0] = v[0].reshape(v[0].shape[0], args.img_size, args.img_size).unsqueeze(1)
else:
v[0] = v[0].reshape(v[0].shape[0], 3, args.img_size, args.img_size)
# v[0] = v[0].transpose(1, 3)
if args.autoencoder:
v[1] = v[1].reshape(v[1].shape[0], 128, 128).unsqueeze(1)
v[0] = v[0].to(device)
v[1] = v[1].to(device)
q = q.to(device)
a = a.to(device)
sample = [v, q, a]
if i < num_batches - 1 and (i + 1) % update_freq > 0:
trainer.train_step(sample, update_params=False)
else:
loss, grad_norm, batch_score = trainer.train_step(sample, update_params=True)
total_norm += grad_norm
count_norm += 1
total_loss += loss.item()
train_score += batch_score
num_updates += 1
if num_updates % int(args.print_interval / update_freq) == 0:
print("Iter: {}, Loss {:.4f}, Norm: {:.4f}, Total norm: {:.4f}, Num updates: {}, Wall time: {:.2f}, ETA: {}".format(i + 1, total_loss / ((num_updates + 1)), grad_norm, total_norm, num_updates, time.time() - wall_time_start, utils.time_since(t, i / num_batches)))
total_loss /= num_updates
train_score = 100 * train_score / (num_updates * args.batch_size)
# Evaluation
if eval_loader is not None:
print("Evaluating...")
trainer.model.train(False)
eval_score, bound = evaluate(model, eval_loader, args)
trainer.model.train(True)
logger.write('epoch %d, time: %.2f' % (epoch, time.time()-t))
logger.write('\ttrain_loss: %.2f, norm: %.4f, score: %.2f' % (total_loss, total_norm/count_norm, train_score))
if eval_loader is not None:
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
# Save per epoch
if epoch >= saving_epoch:
model_path = os.path.join(output, 'model_epoch%d.pth' % epoch)
utils.save_model(model_path, model, epoch, trainer.optimizer)
# Save best epoch
if eval_loader is not None and eval_score > best_eval_score:
model_path = os.path.join(output, 'model_epoch_best.pth')
utils.save_model(model_path, model, epoch, trainer.optimizer)
best_eval_score = eval_score
# Evaluation
def evaluate(model, dataloader, args):
device = args.device
score = 0
upper_bound = 0
num_data = 0
with torch.no_grad():
for v, q, a, _, _, _ in iter(dataloader):
if args.maml:
if 'RAD' in args.VQA_dir:
v[0] = v[0].reshape(v[0].shape[0], args.img_size, args.img_size).unsqueeze(1)
else:
v[0] = v[0].reshape(v[0].shape[0], 3, args.img_size, args.img_size)
# v[0] = v[0].transpose(1, 3)
if args.autoencoder:
v[1] = v[1].reshape(v[1].shape[0], 128, 128).unsqueeze(1)
v[0] = v[0].to(device)
v[1] = v[1].to(device)
q = q.to(device)
a = a.to(device)
if args.autoencoder:
features, _ = model(v, q)
else:
features = model(v, q)
preds = model.classifier(features)
final_preds = preds
batch_score = compute_score_with_logits(final_preds, a.data).sum()
score += batch_score
upper_bound += (a.max(1)[0]).sum()
num_data += final_preds.size(0)
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
return score, upper_bound
| 6,862 | 39.609467 | 282 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/auto_encoder.py | """
Auto-encoder module for MEVF model
This code is written by Binh X. Nguyen and Binh D. Nguyen
<link paper>
"""
import torch.nn as nn
from torch.distributions.normal import Normal
import functools
import operator
import torch.nn.functional as F
import torch
def add_noise(images, mean=0, std=0.1):
normal_dst = Normal(mean, std)
noise = normal_dst.sample(images.shape)
noisy_image = noise + images
return noisy_image
def print_model(model):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
print(nParams)
class Auto_Encoder_Model(nn.Module):
def __init__(self):
super(Auto_Encoder_Model, self).__init__()
# Encoder
self.conv1 = nn.Conv2d(1, 64, padding=1, kernel_size=3)
self.max_pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(64, 32, padding=1, kernel_size=3)
self.max_pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(32, 16, padding=1, kernel_size=3)
# Decoder
self.tran_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.tran_conv2 = nn.ConvTranspose2d(32, 64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv5 = nn.Conv2d(64, 1, kernel_size=3, padding=1)
def forward_pass(self, x):
output = F.relu(self.conv1(x))
output = self.max_pool1(output)
output = F.relu(self.conv2(output))
output = self.max_pool2(output)
output = F.relu(self.conv3(output))
return output
def reconstruct_pass(self, x):
output = F.relu(self.tran_conv1(x))
output = F.relu(self.conv4(output))
output = F.relu(self.tran_conv2(output))
output = torch.sigmoid(self.conv5(output))
return output
def forward(self, x):
output = self.forward_pass(x)
output = self.reconstruct_pass(output)
return output
| 2,019 | 32.114754 | 106 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/trainer.py | """
This code is modified based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa) by Xuan B. Nguyen
"""
import torch
import utils
import contextlib
from collections import defaultdict, OrderedDict
from meters import AverageMeter, TimeMeter
class Trainer(object):
"""
Main class for training.
"""
def __init__(self, args, model, criterion, optimizer=None, ae_criterion = None):
self.args = args
# copy model and criterion on current device
self.model = model.to(self.args.device)
self.criterion = criterion.to(self.args.device)
self.ae_criterion = ae_criterion.to(self.args.device)
# initialize meters
self.meters = OrderedDict()
self.meters['train_loss'] = AverageMeter()
self.meters['train_nll_loss'] = AverageMeter()
self.meters['valid_loss'] = AverageMeter()
self.meters['valid_nll_loss'] = AverageMeter()
self.meters['wps'] = TimeMeter() # words per second
self.meters['ups'] = TimeMeter() # updates per second
self.meters['wpb'] = AverageMeter() # words per batch
self.meters['bsz'] = AverageMeter() # sentences per batch
self.meters['gnorm'] = AverageMeter() # gradient norm
self.meters['clip'] = AverageMeter() # % of updates clipped
self.meters['oom'] = AverageMeter() # out of memory
self.meters['wall'] = TimeMeter() # wall time in seconds
self._buffered_stats = defaultdict(lambda: [])
self._flat_grads = None
self._num_updates = 0
self._optim_history = None
self._optimizer = None
if optimizer is not None:
self._optimizer = optimizer
self.total_loss = 0.0
self.train_score = 0.0
self.total_norm = 0.0
self.count_norm = 0.0
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
def _build_optimizer(self):
# self._optimizer = optim.build_optimizer(self.args, self.model.parameters())
# self._optimizer =
# self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self._optimizer)
pass
def train_step(self, sample, update_params=True):
"""Do forward, backward and parameter update."""
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
# seed = self.args.seed + self.get_num_updates()
# torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
# forward and backward pass
sample = self._prepare_sample(sample)
loss, sample_size, oom_fwd, batch_score = self._forward(sample)
oom_bwd = self._backward(loss)
# buffer stats and logging outputs
# self._buffered_stats['sample_sizes'].append(sample_size)
self._buffered_stats['sample_sizes'].append(1)
self._buffered_stats['ooms_fwd'].append(oom_fwd)
self._buffered_stats['ooms_bwd'].append(oom_bwd)
# update parameters
if update_params:
# gather logging outputs from all replicas
sample_sizes = self._buffered_stats['sample_sizes']
ooms_fwd = self._buffered_stats['ooms_fwd']
ooms_bwd = self._buffered_stats['ooms_bwd']
ooms_fwd = sum(ooms_fwd)
ooms_bwd = sum(ooms_bwd)
# aggregate stats and logging outputs
grad_denom = sum(sample_sizes)
grad_norm = 0
try:
# all-reduce and rescale gradients, then take an optimization step
grad_norm = self._all_reduce_and_rescale(grad_denom)
self._opt()
# update meters
if grad_norm is not None:
self.meters['gnorm'].update(grad_norm)
self.meters['clip'].update(1. if grad_norm > self.args.clip_norm else 0.)
self.meters['oom'].update(ooms_fwd + ooms_bwd)
except OverflowError as e:
self.zero_grad()
print('| WARNING: overflow detected, ' + str(e))
self.clear_buffered_stats()
return loss, grad_norm, batch_score
else:
return None # buffering updates
def _forward(self, sample, eval=False):
# prepare model and optimizer
if eval:
self.model.eval()
else:
self.model.train()
loss = None
oom = 0
batch_score = 0
if sample is not None:
try:
with torch.no_grad() if eval else contextlib.ExitStack():
answers = sample[2]
img_data = sample[0][1]
# MEVF loss computation
if self.args.autoencoder:
features, decoder = self.model(sample[0], sample[1])
else:
features = self.model(sample[0], sample[1])
preds = self.model.classifier(features)
loss = self.criterion(preds.float(), answers)
if self.args.autoencoder: # compute reconstruction loss
loss_ae = self.ae_criterion(img_data, decoder)
# multi-task loss
loss = loss + (loss_ae*self.args.ae_alpha)
loss /= answers.size()[0]
final_preds = preds
batch_score = compute_score_with_logits(final_preds, sample[2].data).sum()
except RuntimeError as e:
if not eval and 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom = 1
loss = None
else:
raise e
return loss, len(sample[0]), oom, batch_score # TODO: Not sure about sample size, need to recheck
def _backward(self, loss):
oom = 0
if loss is not None:
try:
# backward pass
loss.backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom = 1
self.zero_grad()
else:
raise e
return oom
def _all_reduce_and_rescale(self, grad_denom):
# flatten grads into a single buffer and all-reduce
flat_grads = self._flat_grads = self._get_flat_grads(self._flat_grads)
# rescale and clip gradients
flat_grads.div_(grad_denom)
grad_norm = utils.clip_grad_norm_(flat_grads, self.args.clip_norm)
# copy grads back into model parameters
self._set_flat_grads(flat_grads)
return grad_norm
def _get_grads(self):
grads = []
for name, p in self.model.named_parameters():
if not p.requires_grad:
continue
if p.grad is None:
raise RuntimeError('Model parameter did not receive gradient: ' + name + '. '
'Use the param in the forward pass or set requires_grad=False')
grads.append(p.grad.data)
return grads
def _get_flat_grads(self, out=None):
grads = self._get_grads()
if out is None:
grads_size = sum(g.numel() for g in grads)
out = grads[0].new(grads_size).zero_()
offset = 0
for g in grads:
numel = g.numel()
out[offset:offset+numel].copy_(g.view(-1))
offset += numel
return out[:offset]
def _set_flat_grads(self, new_grads):
grads = self._get_grads()
offset = 0
for g in grads:
numel = g.numel()
g.copy_(new_grads[offset:offset+numel].view_as(g))
offset += numel
def _opt(self):
# take an optimization step
self.optimizer.step()
self.zero_grad()
self._num_updates += 1
# update learning rate
# self.lr_scheduler.step_update(self._num_updates)
def zero_grad(self):
self.optimizer.zero_grad()
def clear_buffered_stats(self):
self._buffered_stats.clear()
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def _prepare_sample(self, sample):
if sample is None or len(sample) == 0:
return None
return utils.move_to_cuda(sample)
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
self.train_step(dummy_batch, update_params=False)
self.zero_grad()
self.clear_buffered_stats()
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size()).to(logits.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
| 9,194 | 36.226721 | 152 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/language_model.py | """
This code is from Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang's repository.
https://github.com/jnhwkim/ban-vqa
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class WordEmbedding(nn.Module):
"""Word Embedding
The ntoken-th dim is used for padding_idx, which agrees *implicitly*
with the definition in Dictionary.
"""
def __init__(self, ntoken, emb_dim, dropout, op=''):
super(WordEmbedding, self).__init__()
self.op = op
self.emb = nn.Embedding(ntoken+1, emb_dim, padding_idx=ntoken)
if 'c' in op:
self.emb_ = nn.Embedding(ntoken+1, emb_dim, padding_idx=ntoken)
self.emb_.weight.requires_grad = False # fixed
self.dropout = nn.Dropout(dropout)
self.ntoken = ntoken
self.emb_dim = emb_dim
def init_embedding(self, np_file, tfidf=None, tfidf_weights=None):
weight_init = torch.from_numpy(np.load(np_file))
assert weight_init.shape == (self.ntoken, self.emb_dim)
self.emb.weight.data[:self.ntoken] = weight_init
if tfidf is not None:
if 0 < tfidf_weights.size:
weight_init = torch.cat([weight_init, torch.from_numpy(tfidf_weights)], 0)
weight_init = tfidf.matmul(weight_init) # (N x N') x (N', F)
self.emb_.weight.requires_grad = True
if 'c' in self.op:
self.emb_.weight.data[:self.ntoken] = weight_init.clone()
def forward(self, x):
emb = self.emb(x)
if 'c' in self.op:
emb = torch.cat((emb, self.emb_(x)), 2)
emb = self.dropout(emb)
return emb
class QuestionEmbedding(nn.Module):
def __init__(self, in_dim, num_hid, nlayers, bidirect, dropout, rnn_type='GRU'):
"""Module for question embedding
"""
super(QuestionEmbedding, self).__init__()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = nn.LSTM if rnn_type == 'LSTM' else nn.GRU if rnn_type == 'GRU' else None
self.rnn = rnn_cls(
in_dim, num_hid, nlayers,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self.in_dim = in_dim
self.num_hid = num_hid
self.nlayers = nlayers
self.rnn_type = rnn_type
self.ndirections = 1 + int(bidirect)
def init_hidden(self, batch):
# just to get the type of tensor
weight = next(self.parameters()).data
hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid // self.ndirections)
if self.rnn_type == 'LSTM':
return (Variable(weight.new(*hid_shape).zero_()),
Variable(weight.new(*hid_shape).zero_()))
else:
return Variable(weight.new(*hid_shape).zero_())
def forward(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
output, hidden = self.rnn(x, hidden)
if self.ndirections == 1:
return output[:, -1]
forward_ = output[:, -1, :self.num_hid]
backward = output[:, 0, self.num_hid:]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
output, hidden = self.rnn(x, hidden)
return output
| 3,405 | 35.234043 | 94 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/meters.py | """
This code is from https://github.com/pytorch/fairseq
"""
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
class StopwatchMeter(object):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if self.start_time is not None:
delta = time.time() - self.start_time
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return self.sum / self.n
| 1,513 | 21.264706 | 66 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/VQA_RAD_train.py | import torch, os
import numpy as np
from VQA_RAD import VQARAD_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 64 * 8 * 8
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# Load dataset
# batchsz here means total episode number
mini = VQARAD_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=10000, resize=args.imgsz, t = args.t_dst)
# Train model
for epoch in range(args.epoch//10000):
# fetch meta_batchsz num of episode each time
db = DataLoader(mini, args.task_num, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
for step, (x_spt, y_spt, x_qry, y_qry, _, _) in enumerate(db):
x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(device), x_qry.to(device), y_qry.to(device)
accs = maml(x_spt, y_spt, x_qry, y_qry)
if step % 50 == 0:
end = time.time()
print('step:', step, '\ttraining acc:', accs, '\ttime:', end - start)
start = time.time()
# Save model
model_dir = args.output + '/VQARAD_maml%d_miccai2021_newmethod_%dway_%dshot_t%d'%(args.imgsz, args.n_way, args.k_spt, args.t_dst)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % epoch)
print('saving model to:', model_dir)
torch.save(maml.state_dict(), model_path)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=10000)
argparser.add_argument('--n_way', type=int, help='n way', default=3)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=3)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=3)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=5)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--output', type=str, help='output directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/vqarad_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=0)
args = argparser.parse_args()
main(args)
| 4,249 | 37.990826 | 137 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/VQA_RAD_fuse.py | import torch, os
import numpy as np
from VQA_RAD import VQARAD_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import pickle as p
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def num_tensors(model):
tmp = filter(lambda x: x.requires_grad, model.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
return num
def feature_extraction(maml, db_test, device):
for i in range(args.nums_t):
model_dir = args.input + '/VQARAD_maml%d_miccai2021_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, i)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
start = time.time()
logit_softmax_list = []
feature_list = []
path_list = []
for x_spt, y_spt, x_qry, y_qry, _, flatten_query_x in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
logit_softmax, feature = maml.feature_validation(x_spt, y_spt, x_qry, y_qry)
logit_softmax_list += logit_softmax
feature_list += list(feature.cpu())
path_list += flatten_query_x
if not os.path.exists(args.feature_output):
os.makedirs(args.feature_output)
p.dump(feature_list, open(args.feature_output + '/VQARAD_feature_t%d.pkl'%(i), 'wb'))
p.dump(logit_softmax_list, open(args.feature_output + '/VQARAD_logit_softmax_t%d.pkl'%(i), 'wb'))
# [b, update_step+1]
end = time.time()
print('***************** Time:', end - start)
def load_feature_and_logit(args):
features = []
logits = []
for t in range(args.nums_t):
features.append(torch.stack(p.load(open(args.feature_output + '/VQARAD_feature_t%d.pkl' % (t), 'rb'))))
logits.append(p.load(open(args.feature_output + '/VQARAD_logit_softmax_t%d.pkl' % (t), 'rb')))
features = torch.stack(features).cpu().transpose(0, 1)
logits = torch.tensor(logits).transpose(0, 1)
return features, logits
def fuse_score(features, logits, alpha=0.5):
"""
Compute fuse score using equation: S_F = \gamma S_P + (1-\gamma)\sum_{t=1}^m 1 - Cosine \left(F_{c}, F_t\right) \forall F_{c} \neq F_t
For more detail, please visit Algorithm 2 of the paper.
:param features:
:param logits:
:param alpha:
:return results: score list, type:
"""
cos = torch.nn.CosineSimilarity(dim=0)
results = []
for idx in range(len(features)):
if idx%100 == 0:
print('%d / %d'%(idx, len(features)))
feature_sample = features[idx]
logit_sample = logits[idx]
sample_results = []
for i in range(len(feature_sample)):
row_results = []
for j in range(len(feature_sample)):
if i != j:
sim = cos(feature_sample[i], feature_sample[j])
div = 1 - sim
row_results.append(div)
row_results = torch.nn.functional.tanh(sum(row_results))
fuse_score = (alpha * logit_sample[i]) + ((1 - alpha) * row_results)
# norm = torch.nn.functional.normalize(torch.stack([logit_sample[i], row_results]), p=2, dim=0)
# fuse_score = (alpha * norm[0]) + ((1 - alpha) * norm[1])
sample_results.append(fuse_score)
results.append(sample_results)
return results
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 64 * 8 * 8
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
print(maml)
print('Total trainable tensors:', num_tensors(maml))
# Load validation dataset
# batchsz here means total episode number
mini_test = VQARAD_maml(args.data, mode='val', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz)
db_test = DataLoader(mini_test, 1, shuffle=False, num_workers=1, pin_memory=True)
# Extract validation features, logits for each model and save them
feature_extraction(maml, db_test, device)
# Load all features and logits
features, logits = load_feature_and_logit(args)
# Compute fuse score
results = fuse_score(features, logits, alpha = 0.75)
# Show results to select suitable models
results = torch.tensor(results).mean(dim=0)
print(results)
print('------- sort', torch.sort(results, descending=True))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=3)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=3)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=3)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=5)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/vqarad_maml/')
argparser.add_argument('--nums_t', type=int, help='num model', default=6) # m refinement models + first model (5 + 1 = 6)
argparser.add_argument('--feature_output', type=str, help='input directory for saving feature', default='features')
args = argparser.parse_args()
main(args)
| 7,237 | 40.597701 | 145 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/pathVQA_maml.py | import os
import torch
from torch.utils.data import Dataset
from torchvision.transforms import transforms
import numpy as np
from PIL import Image
import random
class PathVQA_maml(Dataset):
"""
NOTICE: meta-learning is different from general supervised learning, especially the concept of batch and set.
batch: contains several sets
sets: conains n_way * k_shot for meta-train set, n_way * n_query for meta-test set.
"""
def __init__(self, root, mode, batchsz, n_way, k_shot, k_query, resize, startidx=0, unlabel = False, t = 0):
"""
:param root: root path of mini-imagenet
:param mode: train, val or test
:param batchsz: batch size of sets, not batch of imgs
:param n_way:
:param k_shot:
:param k_query: num of qeruy imgs per class
:param resize: resize to
:param startidx: start to index label from startidx
"""
self.batchsz = batchsz # batch of set, not batch of imgs
self.n_way = n_way # n-way
self.k_shot = k_shot # k-shot
self.k_query = k_query # for evaluation
self.setsz = self.n_way * self.k_shot # num of samples per set
self.querysz = self.n_way * self.k_query # number of samples per set for evaluation
self.resize = resize # resize to
self.startidx = startidx # index label not from 0, but from startidx
self.unlabel = unlabel
print('shuffle DB :%s, b:%d, %d-way, %d-shot, %d-query, resize:%d' % (
mode, batchsz, n_way, k_shot, k_query, resize))
if mode == 'train':
self.transform = transforms.Compose([lambda x: Image.open(x).convert('RGB'),
transforms.Resize((self.resize, self.resize)),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(5),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
else:
self.transform = transforms.Compose([lambda x: Image.open(x).convert('RGB'),
transforms.Resize((self.resize, self.resize)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
# self.path = os.path.join(root, 'maml', mode)
# self.path = os.path.join(root, 'maml%d'%(self.resize), mode)
if mode == 'train':
self.path = os.path.join(root, 't%d'%(t), mode)
else:
self.path = os.path.join(root, mode)
print('--loading data from:', self.path)
data = self.loadData(self.path)# image path
self.data = []
self.img2label = {}
self.label2class = []
for i, (k, v) in enumerate(data.items()):
self.data.append(v) # [[img1, img2, ...], [img111, ...]]
self.img2label[k] = i + self.startidx # {"img_name[:9]":label}
self.label2class.append(k)
self.img2label['x'] = i + 1 + self.startidx
self.cls_num = len(self.data)
if self.unlabel:
self.path_unlabel = os.path.join(root, 't%d'%(t), mode + '_unlabel')
self.data_unlabel = [os.path.join(self.path_unlabel, file) for file in os.listdir(self.path_unlabel)]
self.create_batch_unlabel(self.batchsz)
else:
self.create_batch(self.batchsz)
def loadData(self, path):
"""
return a dict saving the information of csv
:param splitFile: csv file name
:return: {label:[file1, file2 ...]}
"""
dictLabels = {}
for (dirpath, dirnames, _) in os.walk(path):
for dir in dirnames:
dictLabels[dir] = [os.path.join(dir, file) for file in os.listdir(os.path.join(dirpath, dir))]
return dictLabels
def create_other(self, selected_cls):
other_pool = []
for idx, i in enumerate(self.data):
if idx not in selected_cls:
other_pool += i
other_pool = [i + 'x' for i in other_pool]
return other_pool
def create_batch(self, batchsz):
"""
create batch for meta-learning.
×episode× here means batch, and it means how many sets we want to retain.
:param batchsz: batch size
:return:
"""
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx[:self.k_shot]) # idx for Dtrain
indexDtest = np.array(selected_imgs_idx[self.k_shot:]) # idx for Dtest
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(self.data[cls])[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other[:self.k_shot]) # idx for Dtrain
indexDtest_other = np.array(selected_imgs_idx_other[self.k_shot:])
support_x.append(
np.array(other_images)[indexDtrain_other].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(other_images)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def create_batch_unlabel(self, batchsz):
# Data Refinement for unlabel data, See Algorithm 1 of the paper
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx) # idx for Dtrain
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
selected_imgs_idx_test = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest = np.array(selected_imgs_idx_test) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other)
support_x.append(
np.array(other_images)[indexDtrain_other].tolist())
selected_imgs_idx_test_other = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest_other = np.array(selected_imgs_idx_test_other) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def __getitem__(self, index):
"""
index means index of sets, 0<= index <= batchsz-1
:param index:
:return:
"""
# [setsz, 3, resize, resize]
support_x = torch.FloatTensor(self.setsz, 3, self.resize, self.resize)
# [setsz]
support_y = np.zeros((self.setsz), dtype=np.int)
# [querysz, 3, resize, resize]
query_x = torch.FloatTensor(self.querysz, 3, self.resize, self.resize)
# [querysz]
flatten_support_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.support_x_batch[index] for item in sublist]
support_y = np.array(
[self.img2label[item.split('/')[0] if item[-1] != "x" else "x"] # filename:n0153282900000005.jpg, the first 9 characters treated as label
for sublist in self.support_x_batch[index] for item in sublist]).astype(np.int32)
if not self.unlabel:
flatten_query_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.query_x_batch[index] for item in sublist]
else:
flatten_query_x = [item for sublist in self.query_x_batch[index] for item in sublist]
# print('global:', support_y, query_y)
# support_y: [setsz]
# query_y: [querysz]
# unique: [n-way], sorted
unique = np.unique(support_y)
random.shuffle(unique)
# relative means the label ranges from 0 to n-way
support_y_relative = np.zeros(self.setsz)
for idx, l in enumerate(unique):
support_y_relative[support_y == l] = idx
# print('relative:', support_y_relative, query_y_relative)
for i, path in enumerate(flatten_support_x):
support_x[i] = self.transform(path)
for i, path in enumerate(flatten_query_x):
query_x[i] = self.transform(path)
# print(support_set_y)
# return support_x, torch.LongTensor(support_y), query_x, torch.LongTensor(query_y)
if not self.unlabel:
query_y_relative = np.zeros(self.querysz)
query_y = np.zeros((self.querysz), dtype=np.int)
query_y = np.array([self.img2label[item.split('/')[0] if item[-1] != "x" else "x"]
for sublist in self.query_x_batch[index] for item in sublist]).astype(np.int32)
for idx, l in enumerate(unique):
query_y_relative[query_y == l] = idx
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(query_y_relative), torch.LongTensor(query_y), flatten_query_x
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(unique), flatten_query_x
def __len__(self):
# as we have built up to batchsz of sets, you can sample some small batch size of sets.
return self.batchsz
| 12,430 | 47.74902 | 155 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/learner.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
class Learner(nn.Module):
"""
"""
def __init__(self, config, imgc, imgsz):
"""
:param config: network config file, type:list of (string, list)
:param imgc: 1 or 3
:param imgsz: 28 or 84
"""
super(Learner, self).__init__()
self.config = config
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running_var
self.vars_bn = nn.ParameterList()
for i, (name, param) in enumerate(self.config):
if name is 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name is 'linear':
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param in self.config:
if name is 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'linear':
tmp = 'linear:(in:%d, out:%d)'%(param[1], param[0])
info += tmp + '\n'
elif name is 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)'%(param[0])
info += tmp + '\n'
elif name is 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name is 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=True):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn is updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
if vars is None:
vars = self.vars
idx = 0
bn_idx = 0
for name, param in self.config:
if name is 'conv2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'convt2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'linear':
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
# print('forward:', idx, x.norm().item())
elif name is 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx+1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name is 'flatten':
# print(x.shape)
feature = torch.mean(x.view(x.size(0), self.config[0][1][0], -1), 2)
x = x.view(x.size(0), -1)
elif name is 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name is 'relu':
x = F.relu(x, inplace=param[0])
elif name is 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name is 'tanh':
x = F.tanh(x)
elif name is 'sigmoid':
x = torch.sigmoid(x)
elif name is 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name is 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name is 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
raise NotImplementedError
# make sure variable is used properly
assert idx == len(vars)
assert bn_idx == len(self.vars_bn)
return x, feature
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars is None:
for p in self.vars:
if p.grad is not None:
p.grad.zero_()
else:
for p in vars:
if p.grad is not None:
p.grad.zero_()
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars | 7,789 | 34.733945 | 111 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/VQA_RAD.py | import os
import torch
from torch.utils.data import Dataset
from torchvision.transforms import transforms
import numpy as np
from PIL import Image
import random
class VQARAD_maml(Dataset):
"""
NOTICE: meta-learning is different from general supervised learning, especially the concept of batch and set.
batch: contains several sets
sets: conains n_way * k_shot for meta-train set, n_way * n_query for meta-test set.
"""
def __init__(self, root, mode, batchsz, n_way, k_shot, k_query, resize, startidx=0, unlabel = False, t = 0):
"""
:param root: root path of mini-imagenet
:param mode: train, val or test
:param batchsz: batch size of sets, not batch of imgs
:param n_way:
:param k_shot:
:param k_query: num of qeruy imgs per class
:param resize: resize to
:param startidx: start to index label from startidx
"""
self.batchsz = batchsz # batch of set, not batch of imgs
self.n_way = n_way # n-way
self.k_shot = k_shot # k-shot
self.k_query = k_query # for evaluation
self.setsz = self.n_way * self.k_shot # num of samples per set
self.querysz = self.n_way * self.k_query # number of samples per set for evaluation
self.resize = resize # resize to
self.startidx = startidx # index label not from 0, but from startidx
self.unlabel = unlabel
print('shuffle DB :%s, b:%d, %d-way, %d-shot, %d-query, resize:%d' % (
mode, batchsz, n_way, k_shot, k_query, resize))
self.transform = transforms.Compose([lambda x: Image.open(x).convert('L'),
transforms.Resize(self.resize),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(5),
transforms.ToTensor(),
lambda x: x / 255.,
lambda x: x.unsqueeze(0)
])
if mode == 'train':
self.path = os.path.join(root, 't%d'%(t), mode)
else:
self.path = os.path.join(root, mode)
print('--loading data from:', self.path)
data = self.loadData(self.path)# image path
self.data = []
self.img2label = {}
self.label2class = []
for i, (k, v) in enumerate(data.items()):
self.data.append(v) # [[img1, img2, ...], [img111, ...]]
self.img2label[k] = i + self.startidx # {"img_name[:9]":label}
self.label2class.append(k)
self.img2label['x'] = i + 1 + self.startidx
self.cls_num = len(self.data)
if self.unlabel:
self.path_unlabel = os.path.join(root, 't%d'%(t), mode + '_unlabel')
self.data_unlabel = [os.path.join(self.path_unlabel, file) for file in os.listdir(self.path_unlabel)]
self.create_batch_unlabel(self.batchsz)
else:
self.create_batch(self.batchsz)
def loadData(self, path):
"""
return a dict saving the information of csv
:param splitFile: csv file name
:return: {label:[file1, file2 ...]}
"""
dictLabels = {}
for (dirpath, dirnames, _) in os.walk(path):
for dir in dirnames:
dictLabels[dir] = [os.path.join(dir, file) for file in os.listdir(os.path.join(dirpath, dir))]
return dictLabels
def create_other(self, selected_cls):
other_pool = []
for idx, i in enumerate(self.data):
if idx not in selected_cls:
other_pool += i
other_pool = [i + 'x' for i in other_pool]
return other_pool
def create_batch(self, batchsz):
"""
create batch for meta-learning.
×episode× here means batch, and it means how many sets we want to retain.
:param batchsz: batch size
:return:
"""
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx[:self.k_shot]) # idx for Dtrain
indexDtest = np.array(selected_imgs_idx[self.k_shot:]) # idx for Dtest
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(self.data[cls])[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other[:self.k_shot]) # idx for Dtrain
indexDtest_other = np.array(selected_imgs_idx_other[self.k_shot:])
support_x.append(
np.array(other_images)[indexDtrain_other].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(other_images)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def create_batch_unlabel(self, batchsz):
# Data Refinement for unlabel data, See Algorithm 1 of the paper
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx) # idx for Dtrain
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
selected_imgs_idx_test = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest = np.array(selected_imgs_idx_test) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other)
support_x.append(
np.array(other_images)[indexDtrain_other].tolist())
selected_imgs_idx_test_other = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest_other = np.array(selected_imgs_idx_test_other) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def __getitem__(self, index):
"""
index means index of sets, 0<= index <= batchsz-1
:param index:
:return:
"""
# [setsz, 3, resize, resize]
support_x = torch.FloatTensor(self.setsz, 1, self.resize, self.resize)
# [setsz]
support_y = np.zeros((self.setsz), dtype=np.int)
# [querysz, 3, resize, resize]
query_x = torch.FloatTensor(self.querysz, 1, self.resize, self.resize)
# [querysz]
flatten_support_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.support_x_batch[index] for item in sublist]
support_y = np.array(
[self.img2label[item.split('/')[0] if item[-1] != "x" else "x"] # filename:n0153282900000005.jpg, the first 9 characters treated as label
for sublist in self.support_x_batch[index] for item in sublist]).astype(np.int32)
if not self.unlabel:
flatten_query_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.query_x_batch[index] for item in sublist]
else:
flatten_query_x = [item for sublist in self.query_x_batch[index] for item in sublist]
# print('global:', support_y, query_y)
# support_y: [setsz]
# query_y: [querysz]
# unique: [n-way], sorted
unique = np.unique(support_y)
random.shuffle(unique)
# relative means the label ranges from 0 to n-way
support_y_relative = np.zeros(self.setsz)
for idx, l in enumerate(unique):
support_y_relative[support_y == l] = idx
# print('relative:', support_y_relative, query_y_relative)
for i, path in enumerate(flatten_support_x):
support_x[i] = self.transform(path)
for i, path in enumerate(flatten_query_x):
query_x[i] = self.transform(path)
# print(support_set_y)
# return support_x, torch.LongTensor(support_y), query_x, torch.LongTensor(query_y)
if not self.unlabel:
query_y_relative = np.zeros(self.querysz)
query_y = np.zeros((self.querysz), dtype=np.int)
query_y = np.array([self.img2label[item.split('/')[0] if item[-1] != "x" else "x"]
for sublist in self.query_x_batch[index] for item in sublist]).astype(np.int32)
for idx, l in enumerate(unique):
query_y_relative[query_y == l] = idx
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(query_y_relative), torch.LongTensor(query_y), flatten_query_x
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(unique), flatten_query_x
def __len__(self):
# as we have built up to batchsz of sets, you can sample some small batch size of sets.
return self.batchsz
| 11,779 | 46.692308 | 155 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/pathVQA_maml_train.py | import torch, os
import numpy as np
from pathVQA_maml import PathVQA_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 32 * 5 * 5
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# Load dataset
# batchsz here means total episode number
mini = PathVQA_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=10000, resize=args.imgsz, t = args.t_dst)
mini_test = PathVQA_maml(args.data, mode='test', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz)
# Train model
for epoch in range(args.epoch//10000):
# fetch meta_batchsz num of episode each time
db = DataLoader(mini, args.task_num, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
for step, (x_spt, y_spt, x_qry, y_qry, _, _) in enumerate(db):
x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(device), x_qry.to(device), y_qry.to(device)
accs = maml(x_spt, y_spt, x_qry, y_qry)
if step % 50 == 0:
end = time.time()
print('step:', step, '\ttraining acc:', accs, '\ttime:', end - start)
start = time.time()
if (step % 1000 == 0 and step != 0) or (step == len(db) - 1): # evaluation
db_test = DataLoader(mini_test, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
accs_all_test = []
for x_spt, y_spt, x_qry, y_qry, _, _ in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
accs = maml.finetunning(x_spt, y_spt, x_qry, y_qry)
accs_all_test.append(accs)
# [b, update_step+1]
accs = np.array(accs_all_test).mean(axis=0).astype(np.float16)
end = time.time()
print('***************** Test acc:', accs, '\ttime:', end - start)
start = time.time()
# Save model
model_dir = args.output + '/maml%d_miccai2021_optimization_newmethod_%dway_%dshot_t%d'%(args.imgsz, args.n_way, args.k_spt, args.t_dst)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % epoch)
print('saving model to:', model_dir)
torch.save(maml.state_dict(), model_path)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=10000)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=3)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=4)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--output', type=str, help='output directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/pathvqa_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=0)
args = argparser.parse_args()
main(args)
| 5,532 | 40.291045 | 143 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/pathVQA_maml_fuse.py | import torch, os
import numpy as np
from pathVQA_maml import PathVQA_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import pickle as p
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def num_tensors(model):
tmp = filter(lambda x: x.requires_grad, model.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
return num
def feature_extraction(maml, db_test, device):
for i in range(args.nums_t):
model_dir = args.input + '/maml%d_miccai2021_optimization_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, i)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
start = time.time()
logit_softmax_list = []
feature_list = []
path_list = []
for x_spt, y_spt, x_qry, y_qry, _, flatten_query_x in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
logit_softmax, feature = maml.feature_validation(x_spt, y_spt, x_qry, y_qry)
logit_softmax_list += logit_softmax
feature_list += list(feature.cpu())
path_list += flatten_query_x
if not os.path.exists(args.feature_output):
os.makedirs(args.feature_output)
p.dump(feature_list, open(args.feature_output + '/feature_t%d.pkl'%(i), 'wb'))
p.dump(logit_softmax_list, open(args.feature_output + '/logit_softmax_t%d.pkl'%(i), 'wb'))
p.dump(path_list, open(args.feature_output + '/path_t%d.pkl'%(i), 'wb'))
# [b, update_step+1]
end = time.time()
print('***************** Time:', end - start)
def load_feature_and_logit(args):
features = []
logits = []
for t in range(args.nums_t):
features.append(torch.stack(p.load(open(args.feature_output + '/feature_t%d.pkl' % (t), 'rb'))))
logits.append(p.load(open(args.feature_output + '/logit_softmax_t%d.pkl' % (t), 'rb')))
features = torch.stack(features).cpu().transpose(0, 1)
logits = torch.tensor(logits).transpose(0, 1)
return features, logits
def fuse_score(features, logits, alpha=0.8):
"""
Compute fuse score using equation: S_F = \gamma S_P + (1-\gamma)\sum_{t=1}^m 1 - Cosine \left(F_{c}, F_t\right) \forall F_{c} \neq F_t
For more detail, please visit Algorithm 2 of the paper.
:param features:
:param logits:
:param alpha:
:return results: score list, type:
"""
cos = torch.nn.CosineSimilarity(dim=0)
results = []
for idx in range(len(features)):
if idx%100 == 0:
print('%d / %d'%(idx, len(features)))
feature_sample = features[idx]
logit_sample = logits[idx]
sample_results = []
for i in range(len(feature_sample)):
row_results = []
for j in range(len(feature_sample)):
if i != j:
sim = cos(feature_sample[i], feature_sample[j])
div = 1 - sim
row_results.append(div)
row_results = sum(row_results)
fuse_score = (alpha * logit_sample[i]) + ((1 - alpha) * row_results)
sample_results.append(fuse_score)
results.append(sample_results)
return results
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 32 * 5 * 5
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
print(maml)
print('Total trainable tensors:', num_tensors(maml))
# Load validation dataset
# batchsz here means total episode number
mini_test = PathVQA_maml(args.data, mode='val', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz)
db_test = DataLoader(mini_test, 1, shuffle=False, num_workers=1, pin_memory=True)
# Extract validation features, logits for each model and save them
feature_extraction(maml, db_test, device)
# Load all features and logits
features, logits = load_feature_and_logit(args)
# Compute fuse score
results = fuse_score(features, logits)
# Show results to select suitable models
results = torch.tensor(results).mean(dim=0)
print(results)
print('------- sort', torch.sort(results, descending=True))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=3)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=4)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/pathvqa_maml/')
argparser.add_argument('--nums_t', type=int, help='num models', default=6) # m refinement models + first model (5 + 1 = 6)
argparser.add_argument('--feature_output', type=str, help='input directory for saving feature', default='features')
args = argparser.parse_args()
main(args)
| 7,202 | 39.926136 | 141 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/VQA_RAD_half.py | import torch, os
import numpy as np
from VQA_RAD import VQARAD_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import shutil
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def unlabel_processing(dataset, unlabel_pool, threshold_fre = 47, threshold_max = 15):
"""
Auto annotation for samples collection.
:param dataset:
:param unlabel_pool:
:param threshold_fre:
:param threshold_max:
:return
"""
count = 0
new_path = dataset.path + '_unlabel_add'
for i in unlabel_pool:
if len(unlabel_pool[i]) > threshold_fre:
unique = set(unlabel_pool[i])
ele_count = {}
for j in unique:
ele_count[j] = unlabel_pool[i].count(j)
max_key = max(ele_count, key=ele_count.get)
max_value = ele_count[max_key]
all_values = list(ele_count.values())
if all_values.count(max_value) == 1 and max_value > threshold_max:
label = int(max_key)
if label != 9:
count += 1
class_name = dataset.label2class[label]
dst_dir = os.path.join(new_path, class_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
dst = os.path.join(dst_dir, i.split('/')[-1])
os.rename(i, dst)
print('The number of additional unlabeled images:', count)
return count
def label_processing(dataset, label_pool, threshold=0.3):
"""
Auto removable for samples that have high uncertanty.
:param dataset:
:param label_pool:
:param threshold:
:return count:
"""
count = 0
new_path = dataset.path + '_label_remove'
if not os.path.exists(new_path):
os.makedirs(new_path)
for i in label_pool:
if max(label_pool[i]) < threshold:
count+=1
dst = os.path.join(new_path, i.split('/')[-1])
os.rename(i, dst)
print('The number of removed labeled images:', count)
return count
def final_processing(dataset):
"""
Refine meta-data and unlabel data.
:param dataset:
:return
"""
root = dataset.path
src = root + '_unlabel_add'
list_dirs = os.listdir(src)
for dir in list_dirs:
files = os.listdir(os.path.join(src, dir))
for file in files:
shutil.move(os.path.join(src, dir, file), os.path.join(root, dir))
shutil.rmtree(src)
src = root + '_label_remove'
list_files = os.listdir(src)
for i in list_files:
shutil.move(os.path.join(src, i), root + '_unlabel')
shutil.rmtree(src)
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 64 * 8 * 8
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
model_dir = args.input + '/VQARAD_maml%d_miccai2021_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, args.t_dst - 1)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# source and destination directory
t_dst = args.t_dst
src = os.path.join(args.data, 't%d'%(t_dst - 1))
dst = os.path.join(args.data, 't%d'%(t_dst))
# copy original dataset to the destination folder
shutil.copytree(src, dst)
####################################################################
## PROCESS UNLABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_unlabel = VQARAD_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=True, t=t_dst)
db_test_unlabel = DataLoader(mini_test_unlabel, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
unlabel_pool = {}
for x_spt, y_spt, x_qry, y_spt_real, flatten_query_x in db_test_unlabel:
x_spt, y_spt, x_qry, y_spt_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_spt_real.squeeze(0).to(device)
results = maml.unlabel_pooling(x_spt, y_spt, x_qry, y_spt_real, flatten_query_x)
for i in results:
if i not in unlabel_pool.keys():
unlabel_pool[i] = []
unlabel_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
unlabel_processing(mini_test_unlabel, unlabel_pool)
####################################################################
## PROCESS LABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_label = VQARAD_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=False, t = t_dst)
db_test_label = DataLoader(mini_test_label, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
label_pool = {}
for x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x in db_test_label:
x_spt, y_spt, x_qry, y_qry, y_qry_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device), y_qry_real.squeeze(0).to(device)
results = maml.label_pooling(x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x)
for i in results:
if i not in label_pool.keys():
label_pool[i] = []
label_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
label_processing(mini_test_label, label_pool)
####################################################################
## CREATE NEW LABEL AND UNLABEL DATASET and REMOVE TEMP FOLDERS
####################################################################
final_processing(mini_test_label)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=3)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=3)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=3)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=5)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/vqarad_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=1)
args = argparser.parse_args()
main(args)
| 8,795 | 38.621622 | 138 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/meta.py | import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from learner import Learner
from copy import deepcopy
class Meta(nn.Module):
"""
Meta Learner
"""
def __init__(self, args, config):
"""
:param args:
"""
super(Meta, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def clip_grad_by_norm_(self, grad, max_norm):
"""
in-place gradient clipping.
:param grad: list of gradients
:param max_norm: maximum norm allowable
:return:
"""
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [b, setsz, c_, h, w]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, c_, h, w]
:param y_qry: [b, querysz]
:return:
"""
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
# 1. run the i-th task and compute loss for k=0
logits, _ = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q, _ = self.net(x_qry[i], fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
# end of all tasks
# sum over all losses on query set across all tasks
loss_q = losses_q[-1] / task_num
# optimize theta parameters
self.meta_optim.zero_grad()
loss_q.backward()
# print('meta update')
# for p in self.net.parameters()[:5]:
# print(torch.norm(p).item())
self.meta_optim.step()
accs = np.array(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
del net
accs = np.array(corrects) / querysz
return accs
def unlabel_pooling(self, x_spt, y_spt, x_qry, y_spt_real, flatten_query_x):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
with torch.no_grad():
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
logits_q_softmax = F.softmax(logits_q, dim=1)
pred_q = logits_q_softmax.argmax(dim=1)
del net
# TODO - IGNORE
results = dict()
for i, path in enumerate(flatten_query_x):
# results[path[0]] = [y_spt_real[pred_q[i]].item(), logits_q_softmax[i][pred_q[i]].item()]
if path[0] not in results.keys():
results[path[0]] = []
results[path[0]] += [y_spt_real[pred_q[i]].item()]
return results
def label_pooling(self, x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
with torch.no_grad():
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
logits_q_softmax = F.softmax(logits_q, dim=1)
pred_q = logits_q_softmax.argmax(dim=1)
del net
results = dict()
# TODO-IGNORE
for i, path in enumerate(flatten_query_x):
if y_qry_real[i] == 20:
continue
if path[0] not in results.keys():
results[path[0]] = []
results[path[0]].append(logits_q_softmax[i][y_qry[i]].item())
return results
def feature_validation(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
with torch.no_grad():
logits_q, feature = net(x_qry, fast_weights, bn_training=True)
logits_q_softmax = F.softmax(logits_q, dim=1)
# pred_q = logits_q_softmax.argmax(dim=1)
del net
# TODO-IGNORE
results = []
for i in range(len(x_qry)):
results.append(logits_q_softmax[i][y_qry[i]].item())
return results, feature
def main():
pass
if __name__ == '__main__':
main()
| 12,796 | 34.350829 | 110 | py |
MICCAI21_MMQ | MICCAI21_MMQ-main/mmq_maml/pathVQA_maml_half.py | import torch, os
import numpy as np
from pathVQA_maml import PathVQA_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import shutil
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def unlabel_processing(dataset, unlabel_pool, threshold_fre = 27, threshold_max = 9):
"""
Auto annotation for samples collection.
:param dataset:
:param unlabel_pool:
:param threshold_fre:
:param threshold_max:
:return count:
"""
count = 0
new_path = dataset.path + '_unlabel_add'
for i in unlabel_pool:
if len(unlabel_pool[i]) > threshold_fre:
unique = set(unlabel_pool[i])
ele_count = {}
for j in unique:
ele_count[j] = unlabel_pool[i].count(j)
max_key = max(ele_count, key=ele_count.get)
max_value = ele_count[max_key]
all_values = list(ele_count.values())
if all_values.count(max_value) == 1 and max_value > threshold_max:
label = int(max_key)
if label != 20:
count += 1
class_name = dataset.label2class[label]
dst_dir = os.path.join(new_path, class_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
dst = os.path.join(dst_dir, i.split('/')[-1])
os.rename(i, dst)
print('The number of additional unlabeled images:', count)
return count
def label_processing(dataset, label_pool, threshold=0.3):
"""
Auto removable for samples that have high uncertanty.
:param dataset:
:param label_pool:
:param threshold:
:return count:
"""
count = 0
new_path = dataset.path + '_label_remove'
if not os.path.exists(new_path):
os.makedirs(new_path)
for i in label_pool:
if max(label_pool[i]) < threshold:
count+=1
dst = os.path.join(new_path, i.split('/')[-1])
os.rename(i, dst)
print('The number of removed labeled images:', count)
return count
def final_processing(dataset):
"""
Refine meta-data and unlabel data.
:param dataset:
:return
"""
root = dataset.path
src = root + '_unlabel_add'
list_dirs = os.listdir(src)
for dir in list_dirs:
files = os.listdir(os.path.join(src, dir))
for file in files:
shutil.move(os.path.join(src, dir, file), os.path.join(root, dir))
shutil.rmtree(src)
src = root + '_label_remove'
list_files = os.listdir(src)
for i in list_files:
shutil.move(os.path.join(src, i), root + '_unlabel')
shutil.rmtree(src)
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 32 * 5 * 5
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
model_dir = args.input + '/maml%d_miccai2021_optimization_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, args.t_dst - 1)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# source and destination directory
t_dst = args.t_dst
src = os.path.join(args.data, 't%d'%(t_dst - 1))
dst = os.path.join(args.data, 't%d'%(t_dst))
# Copy original dataset to the destination folder
shutil.copytree(src, dst)
####################################################################
## PROCESS UNLABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_unlabel = PathVQA_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=True, t=t_dst)
db_test_unlabel = DataLoader(mini_test_unlabel, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
unlabel_pool = {}
for x_spt, y_spt, x_qry, y_spt_real, flatten_query_x in db_test_unlabel:
x_spt, y_spt, x_qry, y_spt_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_spt_real.squeeze(0).to(device)
results = maml.unlabel_pooling(x_spt, y_spt, x_qry, y_spt_real, flatten_query_x)
for i in results:
if i not in unlabel_pool.keys():
unlabel_pool[i] = []
unlabel_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
unlabel_processing(mini_test_unlabel, unlabel_pool)
####################################################################
## PROCESS LABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_label = PathVQA_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=False, t = t_dst)
db_test_label = DataLoader(mini_test_label, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
label_pool = {}
for x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x in db_test_label:
x_spt, y_spt, x_qry, y_qry, y_qry_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device), y_qry_real.squeeze(0).to(device)
results = maml.label_pooling(x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x)
for i in results:
if i not in label_pool.keys():
label_pool[i] = []
label_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
label_processing(mini_test_label, label_pool)
####################################################################
## CREATE NEW LABEL AND UNLABEL DATASET and REMOVE TEMP FOLDERS
####################################################################
final_processing(mini_test_label)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=3)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=4)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/pathvqa_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=1)
args = argparser.parse_args()
main(args)
| 8,907 | 38.591111 | 144 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/test.py | """General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for '--num_test' images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
try:
import wandb
except ImportError:
print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 0
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
# initialize logger
if opt.use_wandb:
wandb_run = wandb.init(project=opt.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
wandb_run._label(repo='CycleGAN-and-pix2pix')
# create a website
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
if opt.load_iter > 0: # load_iter is 0 by default
web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
model.test() # run inference
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize, use_wandb=opt.use_wandb)
webpage.save() # save the HTML
| 4,545 | 55.123457 | 130 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/train.py | """General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
print('The number of training images = %d' % dataset_size)
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
total_iters = 0 # the total number of training iterations
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
model.update_learning_rate() # update learning rates in the beginning of every epoch.
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
| 4,933 | 62.25641 | 186 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/options/base_options.py | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# wandb parameters
parser.add_argument('--use_wandb', action='store_true', help='if specified, then init wandb logging')
parser.add_argument('--wandb_project_name', type=str, default='CycleGAN-and-pix2pix', help='specify wandb project name')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 8,327 | 58.485714 | 235 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/models/base_model.py | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this function, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
old_lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 10,407 | 44.056277 | 260 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/models/colorization_model.py | from .pix2pix_model import Pix2PixModel
import torch
from skimage import color # used for lab2rgb
import numpy as np
class ColorizationModel(Pix2PixModel):
"""This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
The model training requires '-dataset_model colorization' dataset.
It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, we use 'colorization' dataset for this model.
See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
"""
Pix2PixModel.modify_commandline_options(parser, is_train)
parser.set_defaults(dataset_mode='colorization')
return parser
def __init__(self, opt):
"""Initialize the class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
For visualization, we set 'visual_names' as 'real_A' (input real image),
'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
"""
# reuse the pix2pix model
Pix2PixModel.__init__(self, opt)
# specify the images to be visualized.
self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
def lab2rgb(self, L, AB):
"""Convert an Lab tensor image to a RGB numpy output
Parameters:
L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
Returns:
rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
"""
AB2 = AB * 110.0
L2 = (L + 1.0) * 50.0
Lab = torch.cat([L2, AB2], dim=1)
Lab = Lab[0].data.cpu().float().numpy()
Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
rgb = color.lab2rgb(Lab) * 255
return rgb
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
| 3,013 | 42.681159 | 141 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/models/pix2pix_model.py | import torch
from .base_model import BaseModel
from . import networks
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # G(A)
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # update G's weights
| 6,519 | 49.9375 | 162 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/models/networks.py | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
| 28,408 | 45.04376 | 167 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/models/template_model.py | """Model class template
This module provides a template for users to implement custom models.
You can specify '--model template' to use this model.
The class name should be consistent with both the filename and its model option.
The filename should be <model>_dataset.py
The class name should be <Model>Dataset.py
It implements a simple image-to-image translation baseline based on regression loss.
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
min_<netG> ||netG(data_A) - data_B||_1
You need to implement the following functions:
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
<__init__>: Initialize this model class.
<set_input>: Unpack input data and perform data pre-processing.
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
<optimize_parameters>: Update network weights; it will be called in every training iteration.
"""
import torch
from .base_model import BaseModel
from . import networks
class TemplateModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
if is_train:
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['loss_G']
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['data_A', 'data_B', 'output']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
self.model_names = ['G']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
if self.isTrain: # only defined during training time
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
self.criterionLoss = torch.nn.L1Loss()
# define and initialize optimizers. You can define one optimizer for each network.
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = [self.optimizer]
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
self.output = self.netG(self.data_A) # generate output image given the input data_A
def backward(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# caculate the intermediate results if necessary; here self.output has been computed during function <forward>
# calculate loss given the input and intermediate results
self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.forward() # first call forward to calculate intermediate results
self.optimizer.zero_grad() # clear network G's existing gradients
self.backward() # calculate gradients for network G
self.optimizer.step() # update gradients for network G
| 5,951 | 58.52 | 177 | py |
pytorch-CycleGAN-and-pix2pix | pytorch-CycleGAN-and-pix2pix-master/models/cycle_gan_model.py | import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class CycleGANModel(BaseModel):
"""
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
The model training requires '--dataset_mode unaligned' dataset.
By default, it uses a '--netG resnet_9blocks' ResNet generator,
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
and a least-square GANs objective ('--gan_mode lsgan').
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
| 10,557 | 53.14359 | 362 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.