repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/kernels.py | import gpytorch
import torch
import torch.nn as nn
class NNKernel(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_layers: int, hidden_dim: int, flatten: bool =False, **kwargs):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
if self.num_layers == 0:
modules = [nn.Linear(self.input_dim, self.output_dim)]
else:
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
z1 = self.model(x1)
z2 = self.model(x2)
out = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out
class CosineNNKernel(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_layers: int, hidden_dim: int, flatten: bool =False, **kwargs):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
if self.num_layers == 0:
modules = [nn.Linear(self.input_dim, self.output_dim)]
else:
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
if last_dim_is_batch:
raise NotImplementedError()
else:
z1 = self.model(x1)
z2 = self.model(x2)
normalized_input_a = torch.nn.functional.normalize(z1)
normalized_input_b = torch.nn.functional.normalize(z2)
out = torch.mm(normalized_input_a, normalized_input_b.T)
out += 1
if diag:
return torch.diag(out)
else:
return out
class ScalarProductKernel(nn.Module):
def forward(self, x1, x2):
return torch.matmul(x1, x2)
class CosineDistanceKernel(nn.Module):
def forward(self, x1, x2):
normalized_input_a = torch.nn.functional.normalize(x1)
normalized_input_b = torch.nn.functional.normalize(x2)
res = torch.mm(normalized_input_a, normalized_input_b.T)
res = res * -1
res += 1
return res
class PositiveLinear(nn.Module):
def __init__(self, in_features, out_features):
super(PositiveLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input):
w = nn.functional.softplus(self.weight)
return nn.functional.linear(input, w)
class NNKernelNoInner(gpytorch.kernels.Kernel):
def __init__(self, input_dim, num_layers, hidden_dim, flatten=False, **kwargs):
super(NNKernelNoInner, self).__init__(**kwargs)
self.input_dim = input_dim*2
self.output_dim = 1
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [PositiveLinear(self.input_dim, self.hidden_dim), nn.Sigmoid()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(PositiveLinear(self.hidden_dim, self.hidden_dim))
modules.append(nn.Sigmoid())
modules.append(PositiveLinear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n,m), device=x1.get_device())
for i in range(n):
for j in range(i+1):
out[i, j] = self.model(torch.cat((x1[i], x2[j]))).view(-1)
if i != j:
out[j, i] = out[i, j]
#npout = out.cpu().detach().numpy()
#print(np.linalg.eigvals(npout))
#assert np.all(np.linalg.eigvals(npout) +1e-2 >= 0), "not positive"
if diag:
return torch.diag(out)
else:
return out
class MultiNNKernel(gpytorch.kernels.Kernel):
def __init__(self, num_tasks, kernels, **kwargs):
super(MultiNNKernel, self).__init__(**kwargs)
assert isinstance(kernels, list), "kernels must be a list of kernels"
self.num_tasks = num_tasks
self.kernels = nn.ModuleList(kernels)
def num_outputs_per_input(self, x1, x2):
"""
Given `n` data points `x1` and `m` datapoints `x2`, this multitask
kernel returns an `(n*num_tasks) x (m*num_tasks)` covariance matrix.
"""
return self.num_tasks
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n * self.num_tasks, m * self.num_tasks), device=x1.get_device())
for i in range(self.num_tasks):
for j in range(self.num_tasks):
z1 = self.kernels[i].model(x1)
z2 = self.kernels[j].model(x2)
out[i:n*self.num_tasks:self.num_tasks, j:m*self.num_tasks:self.num_tasks] = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out
def init_kernel_function(kernel_input_dim, params):
if params.hn_use_scalar_product:
return ScalarProductKernel()
elif params.hn_use_cosine_distance:
return CosineDistanceKernel()
else:
# if (not self.use_scalar_product) and (not self.use_cosine_distance):
# kernel_output_dim = self.feat_dim + self.n_way if self.attention_embedding else self.feat_dim
kernel_output_dim = params.hn_kernel_out_size
kernel_layers_no = params.hn_kernel_layers_no
kernel_hidden_dim = params.hn_kernel_hidden_dim
if params.hn_use_cosine_nn_kernel:
return CosineNNKernel(kernel_input_dim, kernel_output_dim, kernel_layers_no, kernel_hidden_dim)
else:
return NNKernel(kernel_input_dim, kernel_output_dim, kernel_layers_no, kernel_hidden_dim)
| 11,422 | 37.591216 | 122 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/maml.py | # This code is modified from https://github.com/dragen1860/MAML-Pytorch and https://github.com/katerakelly/pytorch-maml
import torch
import backbone
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from methods.meta_template import MetaTemplate
from time import time
class MAML(MetaTemplate):
def __init__(self, model_func, n_way, n_support, n_query, params=None, approx = False):
super(MAML, self).__init__(model_func, n_way, n_support, change_way = False)
self.loss_fn = nn.CrossEntropyLoss()
self.classifier = backbone.Linear_fw(self.feat_dim, n_way)
self.classifier.bias.data.fill_(0)
self.maml_adapt_classifier = params.maml_adapt_classifier
self.n_task = 4
self.task_update_num = 5
self.train_lr = 0.01
self.approx = approx #first order approx.
def forward(self,x):
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def set_forward(self,x, is_feature = False):
assert is_feature == False, 'MAML do not support fixed feature'
x = x.cuda()
x_var = Variable(x)
x_a_i = x_var[:,:self.n_support,:,:,:].contiguous().view( self.n_way* self.n_support, *x.size()[2:]) #support data
x_b_i = x_var[:,self.n_support:,:,:,:].contiguous().view( self.n_way* self.n_query, *x.size()[2:]) #query data
y_a_i = Variable( torch.from_numpy( np.repeat(range( self.n_way ), self.n_support ) )).cuda() #label for support data
if self.maml_adapt_classifier:
fast_parameters = list(self.classifier.parameters())
for weight in self.classifier.parameters():
weight.fast = None
else:
fast_parameters = list(self.parameters()) #the first gradient calcuated in line 45 is based on original weight
for weight in self.parameters():
weight.fast = None
self.zero_grad()
for task_step in (list(range(self.task_update_num))):
scores = self.forward(x_a_i)
set_loss = self.loss_fn( scores, y_a_i)
grad = torch.autograd.grad(set_loss, fast_parameters, create_graph=True) #build full graph support gradient of gradient
if self.approx:
grad = [ g.detach() for g in grad ] #do not calculate gradient of gradient if using first order approximation
fast_parameters = []
parameters = self.classifier.parameters() if self.maml_adapt_classifier else self.parameters()
for k, weight in enumerate(parameters):
#for usage of weight.fast, please see Linear_fw, Conv_fw in backbone.py
if weight.fast is None:
weight.fast = weight - self.train_lr * grad[k] #create weight.fast
else:
weight.fast = weight.fast - self.train_lr * grad[k] #create an updated weight.fast, note the '-' is not merely minus value, but to create a new weight.fast
fast_parameters.append(weight.fast) #gradients calculated in line 45 are based on newest fast weight, but the graph will retain the link to old weight.fasts
scores = self.forward(x_b_i)
return scores
def set_forward_adaptation(self,x, is_feature = False): #overwrite parrent function
raise ValueError('MAML performs further adapation simply by increasing task_upate_num')
def set_forward_loss(self, x):
scores = self.set_forward(x, is_feature = False)
query_data_labels = Variable( torch.from_numpy( np.repeat(range( self.n_way ), self.n_query ) )).cuda()
loss = self.loss_fn(scores, query_data_labels)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = query_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(query_data_labels)) * 100
return loss, task_accuracy
def train_loop(self, epoch, train_loader, optimizer): #overwrite parrent function
print_freq = 10
avg_loss=0
task_count = 0
loss_all = []
acc_all = []
optimizer.zero_grad()
#train
for i, (x,_) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
loss, task_accuracy = self.set_forward_loss(x)
avg_loss = avg_loss+loss.item()#.data[0]
loss_all.append(loss)
acc_all.append(task_accuracy)
task_count += 1
if task_count == self.n_task: #MAML update several tasks at one time
loss_q = torch.stack(loss_all).sum(0)
loss_q.backward()
optimizer.step()
task_count = 0
loss_all = []
optimizer.zero_grad()
if i % print_freq==0:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
metrics = {"accuracy/train": acc_mean}
return metrics
def test_loop(self, test_loader, return_std = False, return_time: bool = False): #overwrite parrent function
correct = 0
count = 0
acc_all = []
eval_time = 0
iter_num = len(test_loader)
for i, (x,_) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
s = time()
correct_this, count_this = self.correct(x)
t = time()
eval_time += (t -s)
acc_all.append(correct_this/ count_this *100 )
num_tasks = len(acc_all)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
print("Num tasks", num_tasks)
ret = [acc_mean]
if return_std:
ret.append(acc_std)
if return_time:
ret.append(eval_time)
ret.append({})
return ret
def get_logits(self, x):
self.n_query = x.size(1) - self.n_support
logits = self.set_forward(x)
return logits
| 6,570 | 39.312883 | 176 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/meta_template.py | from collections import defaultdict
from typing import Tuple
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import utils
from abc import abstractmethod
class MetaTemplate(nn.Module):
def __init__(self, model_func, n_way, n_support, change_way = True):
super(MetaTemplate, self).__init__()
self.n_way = n_way
self.n_support = n_support
self.n_query = -1 #(change depends on input)
self.feature = model_func()
self.feat_dim = self.feature.final_feat_dim
self.change_way = change_way #some methods allow different_way classification during training and test
@abstractmethod
def set_forward(self,x,is_feature):
pass
@abstractmethod
def set_forward_loss(self, x):
pass
def forward(self,x):
out = self.feature.forward(x)
return out
def parse_feature(self,x,is_feature) -> Tuple[torch.Tensor, torch.Tensor]:
x = Variable(x.cuda())
if is_feature:
z_all = x
else:
x = x.contiguous().view( self.n_way * (self.n_support + self.n_query), *x.size()[2:])
z_all = self.feature.forward(x)
z_all = z_all.view( self.n_way, self.n_support + self.n_query, -1)
z_support = z_all[:, :self.n_support]
z_query = z_all[:, self.n_support:]
return z_support, z_query
def correct(self, x):
scores = self.set_forward(x)
y_query = np.repeat(range( self.n_way ), self.n_query )
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
return float(top1_correct), len(y_query)
def train_loop(self, epoch, train_loader, optimizer ):
print_freq = 10
avg_loss=0
for i, (x,_) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss = self.set_forward_loss( x )
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
def test_loop(self, test_loader, record = None, return_std: bool = False):
correct =0
count = 0
acc_all = []
acc_at = defaultdict(list)
iter_num = len(test_loader)
for i, (x,_) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
y_query = np.repeat(range( self.n_way ), self.n_query )
try:
scores, acc_at_metrics = self.set_forward_with_adaptation(x)
for (k,v) in acc_at_metrics.items():
acc_at[k].append(v)
except Exception as e:
scores = self.set_forward(x)
scores = scores.reshape((self.n_way * self.n_query, self.n_way))
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
correct_this = float(top1_correct)
count_this = len(y_query)
acc_all.append(correct_this/ count_this*100 )
metrics = {
k: np.mean(v) if len(v) > 0 else 0
for (k,v) in acc_at.items()
}
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print(metrics)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
if return_std:
return acc_mean, acc_std, metrics
else:
return acc_mean, metrics
def set_forward_adaptation(self, x, is_feature = True): #further adaptation, default is fixing feature and train a new softmax clasifier
assert is_feature == True, 'Feature is fixed in further adaptation'
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous().view(self.n_way* self.n_support, -1 )
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
y_support = torch.from_numpy(np.repeat(range( self.n_way ), self.n_support ))
y_support = Variable(y_support.cuda())
linear_clf = nn.Linear(self.feat_dim, self.n_way)
linear_clf = linear_clf.cuda()
set_optimizer = torch.optim.SGD(linear_clf.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
loss_function = nn.CrossEntropyLoss()
loss_function = loss_function.cuda()
batch_size = 4
support_size = self.n_way* self.n_support
for epoch in range(100):
rand_id = np.random.permutation(support_size)
for i in range(0, support_size , batch_size):
set_optimizer.zero_grad()
selected_id = torch.from_numpy( rand_id[i: min(i+batch_size, support_size) ]).cuda()
z_batch = z_support[selected_id]
y_batch = y_support[selected_id]
scores = linear_clf(z_batch)
loss = loss_function(scores,y_batch)
loss.backward()
set_optimizer.step()
scores = linear_clf(z_query)
return scores
| 5,764 | 36.679739 | 140 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/DKT.py | ## Original packages
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
## Our packages
import gpytorch
from time import gmtime, strftime
import random
from configs import kernel_type
from models import gp_kernels
#Check if tensorboardx is installed
try:
from tensorboardX import SummaryWriter
IS_TBX_INSTALLED = True
except ImportError:
IS_TBX_INSTALLED = False
print('[WARNING] install tensorboardX to record simulation logs.')
## Training CMD
#ATTENTION: to test each method use exaclty the same command but replace 'train.py' with 'test.py'
# Omniglot->EMNIST without data augmentation
#python3 train.py --dataset="cross_char" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=1
#python3 train.py --dataset="cross_char" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=5
# CUB + data augmentation
#python3 train.py --dataset="CUB" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=1 --train_aug
#python3 train.py --dataset="CUB" --method="DKT" --train_n_way=5 --test_n_way=5 --n_shot=5 --train_aug
class DKT(MetaTemplate):
def __init__(self, model_func, n_way, n_support):
super(DKT, self).__init__(model_func, n_way, n_support)
## GP parameters
self.leghtscale_list = None
self.noise_list = None
self.outputscale_list = None
self.iteration = 0
self.writer=None
self.feature_extractor = self.feature
self.get_model_likelihood_mll() #Init model, likelihood, and mll
if(kernel_type=="cossim"):
self.normalize=True
elif(kernel_type=="bncossim"):
self.normalize=True
latent_size = np.prod(self.feature_extractor.final_feat_dim)
self.feature_extractor.trunk.add_module("bn_out", nn.BatchNorm1d(latent_size))
else:
self.normalize=False
def init_summary(self):
if(IS_TBX_INSTALLED):
time_string = strftime("%d%m%Y_%H%M%S", gmtime())
writer_path = "./log/" + time_string
self.writer = SummaryWriter(log_dir=writer_path)
def get_model_likelihood_mll(self, train_x_list=None, train_y_list=None):
if(train_x_list is None): train_x_list=[torch.ones(100, 64).cuda()]*self.n_way
if(train_y_list is None): train_y_list=[torch.ones(100).cuda()]*self.n_way
model_list = list()
likelihood_list = list()
for train_x, train_y in zip(train_x_list, train_y_list):
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPLayer(train_x=train_x, train_y=train_y, likelihood=likelihood, kernel=kernel_type)
model_list.append(model)
likelihood_list.append(model.likelihood)
self.model = gpytorch.models.IndependentModelList(*model_list).cuda()
self.likelihood = gpytorch.likelihoods.LikelihoodList(*likelihood_list).cuda()
self.mll = gpytorch.mlls.SumMarginalLogLikelihood(self.likelihood, self.model).cuda()
return self.model, self.likelihood, self.mll
def set_forward(self, x, is_feature=False):
pass
def set_forward_loss(self, x):
pass
def _reset_likelihood(self, debug=False):
for param in self.likelihood.parameters():
param.data.normal_(0.0, 0.01)
def _print_weights(self):
for k, v in self.feature_extractor.state_dict().items():
print("Layer {}".format(k))
print(v)
def _reset_variational(self):
mean_init = torch.zeros(128) #num_inducing_points
covar_init = torch.eye(128, 128) #num_inducing_points
mean_init = mean_init.repeat(64, 1) #batch_shape
covar_init = covar_init.repeat(64, 1, 1) #batch_shape
for idx, param in enumerate(self.gp_layer.variational_parameters()):
if(idx==0): param.data.copy_(mean_init) #"variational_mean"
elif(idx==1): param.data.copy_(covar_init) #"chol_variational_covar"
else: raise ValueError('[ERROR] DKT the variational_parameters at index>1 should not exist!')
def _reset_parameters(self):
if(self.leghtscale_list is None):
self.leghtscale_list = list()
self.noise_list = list()
self.outputscale_list = list()
for idx, single_model in enumerate(self.model.models):
self.leghtscale_list.append(single_model.covar_module.base_kernel.lengthscale.clone().detach())
self.noise_list.append(single_model.likelihood.noise.clone().detach())
self.outputscale_list.append(single_model.covar_module.outputscale.clone().detach())
else:
for idx, single_model in enumerate(self.model.models):
single_model.covar_module.base_kernel.lengthscale=self.leghtscale_list[idx].clone().detach()#.requires_grad_(True)
single_model.likelihood.noise=self.noise_list[idx].clone().detach()
single_model.covar_module.outputscale=self.outputscale_list[idx].clone().detach()
def train_loop(self, epoch, train_loader, optimizer, print_freq=10):
optimizer = torch.optim.Adam([{'params': self.model.parameters(), 'lr': 1e-4},
{'params': self.feature_extractor.parameters(), 'lr': 1e-3}])
for i, (x,_) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way: self.n_way = x.size(0)
x_all = x.contiguous().view(self.n_way * (self.n_support + self.n_query), *x.size()[2:]).cuda()
y_all = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_query+self.n_support)).cuda())
x_support = x[:,:self.n_support,:,:,:].contiguous().view(self.n_way * (self.n_support), *x.size()[2:]).cuda()
y_support = np.repeat(range(self.n_way), self.n_support)
x_query = x[:,self.n_support:,:,:,:].contiguous().view(self.n_way * (self.n_query), *x.size()[2:]).cuda()
y_query = np.repeat(range(self.n_way), self.n_query)
x_train = x_all
y_train = y_all
target_list = list()
samples_per_model = int(len(y_train) / self.n_way) #25 / 5 = 5
for way in range(self.n_way):
target = torch.ones(len(y_train), dtype=torch.float32) * -1.0
start_index = way * samples_per_model
stop_index = start_index+samples_per_model
target[start_index:stop_index] = 1.0
target_list.append(target.cuda())
self.model.train()
self.likelihood.train()
self.feature_extractor.train()
z_train = self.feature_extractor.forward(x_train)
if(self.normalize): z_train = F.normalize(z_train, p=2, dim=1)
train_list = [z_train]*self.n_way
lenghtscale = 0.0
noise = 0.0
outputscale = 0.0
for idx, single_model in enumerate(self.model.models):
single_model.set_train_data(inputs=z_train, targets=target_list[idx], strict=False)
if(single_model.covar_module.base_kernel.lengthscale is not None):
lenghtscale+=single_model.covar_module.base_kernel.lengthscale.mean().cpu().detach().numpy().squeeze()
noise+=single_model.likelihood.noise.cpu().detach().numpy().squeeze()
if(single_model.covar_module.outputscale is not None):
outputscale+=single_model.covar_module.outputscale.cpu().detach().numpy().squeeze()
if(single_model.covar_module.base_kernel.lengthscale is not None): lenghtscale /= float(len(self.model.models))
noise /= float(len(self.model.models))
if(single_model.covar_module.outputscale is not None): outputscale /= float(len(self.model.models))
## Optimize
optimizer.zero_grad()
output = self.model(*self.model.train_inputs)
loss = -self.mll(output, self.model.train_targets)
loss.backward()
optimizer.step()
self.iteration = i+(epoch*len(train_loader))
if(self.writer is not None): self.writer.add_scalar('loss', loss, self.iteration)
#Eval on the query (validation set)
with torch.no_grad():
self.model.eval()
self.likelihood.eval()
self.feature_extractor.eval()
z_support = self.feature_extractor.forward(x_support).detach()
if(self.normalize): z_support = F.normalize(z_support, p=2, dim=1)
z_support_list = [z_support]*len(y_support)
predictions = self.likelihood(*self.model(*z_support_list)) #return 20 MultiGaussian Distributions
predictions_list = list()
for gaussian in predictions:
predictions_list.append(torch.sigmoid(gaussian.mean).cpu().detach().numpy())
y_pred = np.vstack(predictions_list).argmax(axis=0) #[model, classes]
accuracy_support = (np.sum(y_pred==y_support) / float(len(y_support))) * 100.0
if(self.writer is not None): self.writer.add_scalar('GP_support_accuracy', accuracy_support, self.iteration)
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
z_query_list = [z_query]*len(y_query)
predictions = self.likelihood(*self.model(*z_query_list)) #return 20 MultiGaussian Distributions
predictions_list = list()
for gaussian in predictions:
predictions_list.append(torch.sigmoid(gaussian.mean).cpu().detach().numpy())
y_pred = np.vstack(predictions_list).argmax(axis=0) #[model, classes]
accuracy_query = (np.sum(y_pred==y_query) / float(len(y_query))) * 100.0
if(self.writer is not None): self.writer.add_scalar('GP_query_accuracy', accuracy_query, self.iteration)
if i % print_freq==0:
if(self.writer is not None): self.writer.add_histogram('z_support', z_support, self.iteration)
print('Epoch [{:d}] [{:d}/{:d}] | Outscale {:f} | Lenghtscale {:f} | Noise {:f} | Loss {:f} | Supp. {:f} | Query {:f}'.format(epoch, i, len(train_loader), outputscale, lenghtscale, noise, loss.item(), accuracy_support, accuracy_query))
def correct(self, x, N=0, laplace=False):
##Dividing input x in query and support set
x_support = x[:,:self.n_support,:,:,:].contiguous().view(self.n_way * (self.n_support), *x.size()[2:]).cuda()
y_support = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
x_query = x[:,self.n_support:,:,:,:].contiguous().view(self.n_way * (self.n_query), *x.size()[2:]).cuda()
y_query = np.repeat(range(self.n_way), self.n_query)
## Laplace approximation of the posterior
if(laplace):
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, Matern
from sklearn.gaussian_process.kernels import ConstantKernel as C
kernel = 1.0 * RBF(length_scale=0.1 , length_scale_bounds=(0.1, 10.0))
gp = GaussianProcessClassifier(kernel=kernel, optimizer=None)
z_support = self.feature_extractor.forward(x_support).detach()
if(self.normalize): z_support = F.normalize(z_support, p=2, dim=1)
gp.fit(z_support.cpu().detach().numpy(), y_support.cpu().detach().numpy())
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
y_pred = gp.predict(z_query.cpu().detach().numpy())
accuracy = (np.sum(y_pred==y_query) / float(len(y_query))) * 100.0
top1_correct = np.sum(y_pred==y_query)
count_this = len(y_query)
return float(top1_correct), count_this, 0.0
x_train = x_support
y_train = y_support
target_list = list()
samples_per_model = int(len(y_train) / self.n_way)
for way in range(self.n_way):
target = torch.ones(len(y_train), dtype=torch.float32) * -1.0
start_index = way * samples_per_model
stop_index = start_index+samples_per_model
target[start_index:stop_index] = 1.0
target_list.append(target.cuda())
z_train = self.feature_extractor.forward(x_train).detach() #[340, 64]
if(self.normalize): z_train = F.normalize(z_train, p=2, dim=1)
train_list = [z_train]*self.n_way
for idx, single_model in enumerate(self.model.models):
single_model.set_train_data(inputs=z_train, targets=target_list[idx], strict=False)
optimizer = torch.optim.Adam([{'params': self.model.parameters()}], lr=1e-3)
self.model.train()
self.likelihood.train()
self.feature_extractor.eval()
avg_loss=0.0
for i in range(0, N):
## Optimize
optimizer.zero_grad()
output = self.model(*self.model.train_inputs)
loss = -self.mll(output, self.model.train_targets)
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
with torch.no_grad(), gpytorch.settings.num_likelihood_samples(32):
self.model.eval()
self.likelihood.eval()
self.feature_extractor.eval()
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
z_query_list = [z_query]*len(y_query)
predictions = self.likelihood(*self.model(*z_query_list)) #return n_way MultiGaussians
predictions_list = list()
for gaussian in predictions:
predictions_list.append(torch.sigmoid(gaussian.mean).cpu().detach().numpy())
y_pred = np.vstack(predictions_list).argmax(axis=0) #[model, classes]
top1_correct = np.sum(y_pred == y_query)
count_this = len(y_query)
return float(top1_correct), count_this, avg_loss/float(N+1e-10)
def test_loop(self, test_loader, record=None, return_std=False):
print_freq = 10
correct =0
count = 0
acc_all = []
iter_num = len(test_loader)
for i, (x,_) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
correct_this, count_this, loss_value = self.correct(x)
acc_all.append(correct_this/ count_this*100)
if(i % 100==0):
acc_mean = np.mean(np.asarray(acc_all))
print('Test | Batch {:d}/{:d} | Loss {:f} | Acc {:f}'.format(i, len(test_loader), loss_value, acc_mean))
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
if(self.writer is not None): self.writer.add_scalar('test_accuracy', acc_mean, self.iteration)
if(return_std): return acc_mean, acc_std
else: return acc_mean
def get_logits(self, x):
self.n_query = x.size(1) - self.n_support
##Dividing input x in query and support set
x_support = x[:,:self.n_support,:,:,:].contiguous().view(self.n_way * (self.n_support), *x.size()[2:]).cuda()
y_support = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
x_query = x[:,self.n_support:,:,:,:].contiguous().view(self.n_way * (self.n_query), *x.size()[2:]).cuda()
y_query = np.repeat(range(self.n_way), self.n_query)
# Init to dummy values
x_train = x_support
y_train = y_support
target_list = list()
samples_per_model = int(len(y_train) / self.n_way)
for way in range(self.n_way):
target = torch.ones(len(y_train), dtype=torch.float32) * -1.0
start_index = way * samples_per_model
stop_index = start_index+samples_per_model
target[start_index:stop_index] = 1.0
target_list.append(target.cuda())
z_train = self.feature_extractor.forward(x_train).detach() #[340, 64]
if(self.normalize): z_train = F.normalize(z_train, p=2, dim=1)
train_list = [z_train]*self.n_way
for idx, single_model in enumerate(self.model.models):
single_model.set_train_data(inputs=z_train, targets=target_list[idx], strict=False)
with torch.no_grad(), gpytorch.settings.num_likelihood_samples(32):
self.model.eval()
self.likelihood.eval()
self.feature_extractor.eval()
z_query = self.feature_extractor.forward(x_query).detach()
if(self.normalize): z_query = F.normalize(z_query, p=2, dim=1)
z_query_list = [z_query]*len(y_query)
predictions = self.likelihood(*self.model(*z_query_list)) #return n_way MultiGaussians
predictions_list = list()
for gaussian in predictions:
predictions_list.append(gaussian.mean) #.cpu().detach().numpy())
y_pred = torch.stack(predictions_list, 1)
return y_pred
class ExactGPLayer(gpytorch.models.ExactGP):
'''
Parameters learned by the model:
likelihood.noise_covar.raw_noise
covar_module.raw_outputscale
covar_module.base_kernel.raw_lengthscale
'''
def __init__(self, train_x, train_y, likelihood, kernel='linear'):
#Set the likelihood noise and enable/disable learning
likelihood.noise_covar.raw_noise.requires_grad = False
likelihood.noise_covar.noise = torch.tensor(0.1)
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
## Linear kernel
if(kernel=='linear'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.LinearKernel())
## RBF kernel
elif(kernel=='rbf' or kernel=='RBF'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
## Matern kernel
elif(kernel=='matern'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel())
## Polynomial (p=1)
elif(kernel=='poli1'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PolynomialKernel(power=1))
## Polynomial (p=2)
elif(kernel=='poli2'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PolynomialKernel(power=2))
elif(kernel=='cossim' or kernel=='bncossim'):
## Cosine distance and BatchNorm Cosine distance
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.LinearKernel())
self.covar_module.base_kernel.variance = 1.0
self.covar_module.base_kernel.raw_variance.requires_grad = False
elif(kernel=='nn'):
self.input_dim = 1600
self.output_dim = 1600
self.num_layers = 4
self.hidden_dim = 64
kernel = gp_kernels.NNKernel(input_dim=self.input_dim,
output_dim=self.output_dim,
num_layers=self.num_layers,
hidden_dim=self.hidden_dim)
self.covar_module = gpytorch.kernels.ScaleKernel(kernel)
else:
raise ValueError("[ERROR] the kernel '" + str(kernel) + "' is not supported!")
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
| 20,017 | 50.328205 | 251 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/protonet.py | # This code is modified from https://github.com/jakesnell/prototypical-networks
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
class ProtoNet(MetaTemplate):
def __init__(self, model_func, n_way, n_support, n_query=None):
super(ProtoNet, self).__init__( model_func, n_way, n_support)
self.loss_fn = nn.CrossEntropyLoss()
def set_forward(self,x,is_feature = False):
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous()
z_proto = z_support.view(self.n_way, self.n_support, -1 ).mean(1) #the shape of z is [n_data, n_dim]
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
dists = euclidean_dist(z_query, z_proto)
scores = -dists
return scores
def set_forward_loss(self, x):
y_query = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
y_query = Variable(y_query.cuda())
scores = self.set_forward(x)
return self.loss_fn(scores, y_query )
def euclidean_dist( x, y):
# x: N x D
# y: M x D
n = x.size(0)
m = y.size(0)
d = x.size(1)
assert d == y.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
return torch.pow(x - y, 2).sum(2)
| 1,434 | 27.7 | 112 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/baselinetrain.py | import backbone
import utils
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type = 'softmax'):
super(BaselineTrain, self).__init__()
self.feature = model_func()
if loss_type == 'softmax':
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classifier.bias.data.fill_(0)
elif loss_type == 'dist': #Baseline ++
self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)
self.loss_type = loss_type #'softmax' #'dist'
self.num_class = num_class
self.loss_fn = nn.CrossEntropyLoss()
def forward(self,x):
x = Variable(x.cuda())
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def forward_loss(self, x, y):
scores = self.forward(x)
y = Variable(y.cuda())
return self.loss_fn(scores, y )
def train_loop(self, epoch, train_loader, optimizer):
print_freq = 10
avg_loss=0
for i, (x,y) in enumerate(train_loader):
optimizer.zero_grad()
loss = self.forward_loss(x, y)
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.data.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1) ))
def test_loop(self, val_loader):
return -1 #no validation, just save model during iteration
| 1,780 | 32.603774 | 124 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/baselinefinetune.py | import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
class BaselineFinetune(MetaTemplate):
def __init__(self, model_func, n_way, n_support, loss_type = "softmax"):
super(BaselineFinetune, self).__init__( model_func, n_way, n_support)
self.loss_type = loss_type
def set_forward(self,x,is_feature = True):
return self.set_forward_adaptation(x,is_feature); #Baseline always do adaptation
def set_forward_adaptation(self,x,is_feature = True):
assert is_feature == True, 'Baseline only support testing with feature'
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous().view(self.n_way* self.n_support, -1 )
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
y_support = torch.from_numpy(np.repeat(range( self.n_way ), self.n_support ))
y_support = Variable(y_support.cuda())
if self.loss_type == 'softmax':
linear_clf = nn.Linear(self.feat_dim, self.n_way)
elif self.loss_type == 'dist':
linear_clf = backbone.distLinear(self.feat_dim, self.n_way)
linear_clf = linear_clf.cuda()
set_optimizer = torch.optim.SGD(linear_clf.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
loss_function = nn.CrossEntropyLoss()
loss_function = loss_function.cuda()
batch_size = 4
support_size = self.n_way* self.n_support
for epoch in range(100):
rand_id = np.random.permutation(support_size)
for i in range(0, support_size , batch_size):
set_optimizer.zero_grad()
selected_id = torch.from_numpy( rand_id[i: min(i+batch_size, support_size) ]).cuda()
z_batch = z_support[selected_id]
y_batch = y_support[selected_id]
scores = linear_clf(z_batch)
loss = loss_function(scores,y_batch)
loss.backward()
set_optimizer.step()
scores = linear_clf(z_query)
return scores
def set_forward_loss(self,x):
raise ValueError('Baseline predict on pretrained feature and do not support finetune backbone')
| 2,381 | 39.372881 | 124 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/kernel_convolutions.py | import torch
import torch.nn as nn
class KernelConv(nn.Module):
def __init__(self, n_shot, hn_kernel_convolution_output_dim):
super(KernelConv, self).__init__()
if n_shot == 5:
self.conv = nn.Sequential(
nn.Conv2d(1, 2, kernel_size=(5, 5)),
nn.ReLU(inplace=True),
nn.Conv2d(2, 3, kernel_size=(3, 3)),
nn.ReLU(inplace=True),
nn.Conv2d(3, 5, kernel_size=(2, 2)),
nn.ReLU(inplace=True)
)
self.fc = nn.Linear(5 * 18 * 18, hn_kernel_convolution_output_dim)
else:
self.conv = nn.Sequential(
nn.Conv2d(1, 2, kernel_size=(2, 2)),
nn.ReLU(inplace=True),
nn.Conv2d(2, 3, kernel_size=(2, 2)),
nn.ReLU(inplace=True),
nn.Conv2d(3, 5, kernel_size=(2, 2)),
nn.ReLU(inplace=True)
)
self.fc = nn.Linear(5 * 2 * 2, hn_kernel_convolution_output_dim)
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x, start_dim=1, end_dim=-1)
out = self.fc(x)
return out | 1,174 | 34.606061 | 78 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/transformer.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def scaled_dot_product(q, k, v, mask=None):
d_k = q.size()[-1]
attn_logits = torch.matmul(q, k.transpose(-2, -1))
attn_logits = attn_logits / math.sqrt(d_k)
if mask is not None:
attn_logits = attn_logits.masked_fill(mask == 0, -9e15)
attention = F.softmax(attn_logits, dim=-1)
values = torch.matmul(attention, v)
return values, attention
class MultiheadAttention(nn.Module):
def __init__(self, input_dim, embed_dim, num_heads):
super().__init__()
assert embed_dim % num_heads == 0, "Embedding dimension must be 0 modulo number of heads."
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
# Stack all weight matrices 1...h together for efficiency
# Note that in many implementations you see "bias=False" which is optional
self.qkv_proj = nn.Linear(input_dim, 3*embed_dim)
self.o_proj = nn.Linear(embed_dim, embed_dim)
self._reset_parameters()
def _reset_parameters(self):
# Original Transformer initialization, see PyTorch documentation
nn.init.xavier_uniform_(self.qkv_proj.weight)
self.qkv_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.o_proj.weight)
self.o_proj.bias.data.fill_(0)
def forward(self, x, mask=None, return_attention=False):
batch_size, seq_length, embed_dim = x.size()
qkv = self.qkv_proj(x)
# Separate Q, K, V from linear output
qkv = qkv.reshape(batch_size, seq_length, self.num_heads, 3*self.head_dim)
qkv = qkv.permute(0, 2, 1, 3) # [Batch, Head, SeqLen, Dims]
q, k, v = qkv.chunk(3, dim=-1)
# Determine value outputs
values, attention = scaled_dot_product(q, k, v, mask=mask)
values = values.permute(0, 2, 1, 3) # [Batch, SeqLen, Head, Dims]
values = values.reshape(batch_size, seq_length, embed_dim)
o = self.o_proj(values)
if return_attention:
return o, attention
else:
return o
class EncoderBlock(nn.Module):
def __init__(self, input_dim, num_heads, dim_feedforward, dropout=0.0):
"""
Inputs:
input_dim - Dimensionality of the input
num_heads - Number of heads to use in the attention block
dim_feedforward - Dimensionality of the hidden layer in the MLP
dropout - Dropout probability to use in the dropout layers
"""
super().__init__()
# Attention layer
self.self_attn = MultiheadAttention(input_dim, input_dim, num_heads)
# Two-layer MLP
self.linear_net = nn.Sequential(
nn.Linear(input_dim, dim_feedforward),
nn.Dropout(dropout),
nn.ReLU(inplace=True),
nn.Linear(dim_feedforward, input_dim)
)
# Layers to apply in between the main layers
self.norm1 = nn.LayerNorm(input_dim)
self.norm2 = nn.LayerNorm(input_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask=None):
# Attention part
attn_out = self.self_attn(x, mask=mask)
x = x + self.dropout(attn_out)
x = self.norm1(x)
# MLP part
linear_out = self.linear_net(x)
x = x + self.dropout(linear_out)
x = self.norm2(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, num_layers, **block_args):
super().__init__()
self.layers = nn.ModuleList([EncoderBlock(**block_args) for _ in range(num_layers)])
def forward(self, x, mask=None):
for l in self.layers:
x = l(x, mask=mask)
return x
def get_attention_maps(self, x, mask=None):
attention_maps = []
for l in self.layers:
_, attn_map = l.self_attn(x, mask=mask, return_attention=True)
attention_maps.append(attn_map)
x = l(x)
return attention_maps | 4,046 | 32.172131 | 98 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/feature_transfer_regression.py | import numpy as np
import gpytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import backbone
from torch.autograd import Variable
from data.qmul_loader import get_batch, train_people, test_people
class Regressor(nn.Module):
def __init__(self):
super(Regressor, self).__init__()
self.layer4 = nn.Linear(2916, 1)
def return_clones(self):
layer4_w = self.layer4.weight.data.clone().detach()
layer4_b = self.layer4.bias.data.clone().detach()
def assign_clones(self, weights_list):
self.layer4.weight.data.copy_(weights_list[0])
self.layer4.weight.data.copy_(weights_list[1])
def forward(self, x):
out = self.layer4(x)
return out
class FeatureTransfer(nn.Module):
def __init__(self, backbone):
super(FeatureTransfer, self).__init__()
regressor = Regressor()
self.feature_extractor = backbone
self.model = Regressor()
self.criterion = nn.MSELoss()
def train_loop(self, epoch, optimizer):
batch, batch_labels = get_batch(train_people)
batch, batch_labels = batch.cuda(), batch_labels.cuda()
for inputs, labels in zip(batch, batch_labels):
optimizer.zero_grad()
output = self.model(self.feature_extractor(inputs))
loss = self.criterion(output, labels)
loss.backward()
optimizer.step()
if(epoch%10==0):
print('[%d] - Loss: %.3f' % (
epoch, loss.item()
))
def test_loop(self, n_support, optimizer): # we need optimizer to take one gradient step
inputs, targets = get_batch(test_people)
support_ind = list(np.random.choice(list(range(19)), replace=False, size=n_support))
query_ind = [i for i in range(19) if i not in support_ind]
x_all = inputs.cuda()
y_all = targets.cuda()
x_support = inputs[:,support_ind,:,:,:].cuda()
y_support = targets[:,support_ind].cuda()
x_query = inputs[:,query_ind,:,:,:].cuda()
y_query = targets[:,query_ind].cuda()
# choose a random test person
n = np.random.randint(0, len(test_people)-1)
optimizer.zero_grad()
z_support = self.feature_extractor(x_support[n]).detach()
output_support = self.model(z_support).squeeze()
loss = self.criterion(output_support, y_support[n])
loss.backward()
optimizer.step()
self.feature_extractor.eval()
self.model.eval()
z_all = self.feature_extractor(x_all[n]).detach()
output_all = self.model(z_all).squeeze()
return self.criterion(output_all, y_all[n])
def save_checkpoint(self, checkpoint):
torch.save({'feature_extractor': self.feature_extractor.state_dict(), 'model':self.model.state_dict()}, checkpoint)
def load_checkpoint(self, checkpoint):
ckpt = torch.load(checkpoint)
self.feature_extractor.load_state_dict(ckpt['feature_extractor'])
self.model.load_state_dict(ckpt['model'])
| 3,110 | 33.955056 | 123 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/DKT_regression.py | ## Original packages
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import math
import torch.nn.functional as F
## Our packages
import gpytorch
from time import gmtime, strftime
import random
from statistics import mean
from data.qmul_loader import get_batch, train_people, test_people
from configs import kernel_type
class DKT(nn.Module):
def __init__(self, backbone):
super(DKT, self).__init__()
## GP parameters
self.feature_extractor = backbone
self.get_model_likelihood_mll() #Init model, likelihood, and mll
def get_model_likelihood_mll(self, train_x=None, train_y=None):
if(train_x is None): train_x=torch.ones(19, 2916).cuda()
if(train_y is None): train_y=torch.ones(19).cuda()
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPLayer(train_x=train_x, train_y=train_y, likelihood=likelihood, kernel=kernel_type)
self.model = model.cuda()
self.likelihood = likelihood.cuda()
self.mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model).cuda()
self.mse = nn.MSELoss()
return self.model, self.likelihood, self.mll
def set_forward(self, x, is_feature=False):
pass
def set_forward_loss(self, x):
pass
def train_loop(self, epoch, optimizer):
batch, batch_labels = get_batch(train_people)
batch, batch_labels = batch.cuda(), batch_labels.cuda()
for inputs, labels in zip(batch, batch_labels):
optimizer.zero_grad()
z = self.feature_extractor(inputs)
self.model.set_train_data(inputs=z, targets=labels)
predictions = self.model(z)
loss = -self.mll(predictions, self.model.train_targets)
loss.backward()
optimizer.step()
mse = self.mse(predictions.mean, labels)
if (epoch%10==0):
print('[%d] - Loss: %.3f MSE: %.3f noise: %.3f' % (
epoch, loss.item(), mse.item(),
self.model.likelihood.noise.item()
))
def test_loop(self, n_support, optimizer=None): # no optimizer needed for GP
inputs, targets = get_batch(test_people)
support_ind = list(np.random.choice(list(range(19)), replace=False, size=n_support))
query_ind = [i for i in range(19) if i not in support_ind]
x_all = inputs.cuda()
y_all = targets.cuda()
x_support = inputs[:,support_ind,:,:,:].cuda()
y_support = targets[:,support_ind].cuda()
x_query = inputs[:,query_ind,:,:,:]
y_query = targets[:,query_ind].cuda()
# choose a random test person
n = np.random.randint(0, len(test_people)-1)
z_support = self.feature_extractor(x_support[n]).detach()
self.model.set_train_data(inputs=z_support, targets=y_support[n], strict=False)
self.model.eval()
self.feature_extractor.eval()
self.likelihood.eval()
with torch.no_grad():
z_query = self.feature_extractor(x_all[n]).detach()
pred = self.likelihood(self.model(z_query))
lower, upper = pred.confidence_region() #2 standard deviations above and below the mean
mse = self.mse(pred.mean, y_all[n])
return mse
def save_checkpoint(self, checkpoint):
# save state
gp_state_dict = self.model.state_dict()
likelihood_state_dict = self.likelihood.state_dict()
nn_state_dict = self.feature_extractor.state_dict()
torch.save({'gp': gp_state_dict, 'likelihood': likelihood_state_dict, 'net':nn_state_dict}, checkpoint)
def load_checkpoint(self, checkpoint):
ckpt = torch.load(checkpoint)
self.model.load_state_dict(ckpt['gp'])
self.likelihood.load_state_dict(ckpt['likelihood'])
self.feature_extractor.load_state_dict(ckpt['net'])
class ExactGPLayer(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, kernel='linear'):
super(ExactGPLayer, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
## RBF kernel
if(kernel=='rbf' or kernel=='RBF'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
## Spectral kernel
elif(kernel=='spectral'):
self.covar_module = gpytorch.kernels.SpectralMixtureKernel(num_mixtures=4, ard_num_dims=2916)
else:
raise ValueError("[ERROR] the kernel '" + str(kernel) + "' is not supported for regression, use 'rbf' or 'spectral'.")
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
| 4,900 | 36.7 | 130 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/matchingnet.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
import utils
import copy
class MatchingNet(MetaTemplate):
def __init__(self, model_func, n_way, n_support, n_query=None):
super(MatchingNet, self).__init__( model_func, n_way, n_support)
self.loss_fn = nn.NLLLoss()
self.FCE = FullyContextualEmbedding(self.feat_dim)
self.G_encoder = nn.LSTM(self.feat_dim, self.feat_dim, 1, batch_first=True, bidirectional=True)
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
def encode_training_set(self, S, G_encoder = None):
if G_encoder is None:
G_encoder = self.G_encoder
out_G = G_encoder(S.unsqueeze(0))[0]
out_G = out_G.squeeze(0)
G = S + out_G[:,:S.size(1)] + out_G[:,S.size(1):]
G_norm = torch.norm(G,p=2, dim =1).unsqueeze(1).expand_as(G)
G_normalized = G.div(G_norm+ 0.00001)
return G, G_normalized
def get_logprobs(self, f, G, G_normalized, Y_S, FCE = None):
if FCE is None:
FCE = self.FCE
F = FCE(f, G)
F_norm = torch.norm(F,p=2, dim =1).unsqueeze(1).expand_as(F)
F_normalized = F.div(F_norm+ 0.00001)
#scores = F.mm(G_normalized.transpose(0,1)) #The implementation of Ross et al., but not consistent with origin paper and would cause large norm feature dominate
scores = self.relu( F_normalized.mm(G_normalized.transpose(0,1)) ) *100 # The original paper use cosine simlarity, but here we scale it by 100 to strengthen highest probability after softmax
softmax = self.softmax(scores)
logprobs =(softmax.mm(Y_S)+1e-6).log()
return logprobs
def set_forward(self, x, is_feature = False):
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous().view( self.n_way* self.n_support, -1 )
z_query = z_query.contiguous().view( self.n_way* self.n_query, -1 )
G, G_normalized = self.encode_training_set( z_support)
y_s = torch.from_numpy(np.repeat(range( self.n_way ), self.n_support ))
Y_S = Variable( utils.one_hot(y_s, self.n_way ) ).cuda()
f = z_query
logprobs = self.get_logprobs(f, G, G_normalized, Y_S)
return logprobs
def set_forward_loss(self, x):
y_query = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
y_query = Variable(y_query.cuda())
logprobs = self.set_forward(x)
return self.loss_fn(logprobs, y_query )
def cuda(self):
super(MatchingNet, self).cuda()
self.FCE = self.FCE.cuda()
return self
class FullyContextualEmbedding(nn.Module):
def __init__(self, feat_dim):
super(FullyContextualEmbedding, self).__init__()
self.lstmcell = nn.LSTMCell(feat_dim*2, feat_dim)
self.softmax = nn.Softmax()
self.c_0 = Variable(torch.zeros(1,feat_dim))
self.feat_dim = feat_dim
#self.K = K
def forward(self, f, G):
h = f
c = self.c_0.expand_as(f)
G_T = G.transpose(0,1)
K = G.size(0) #Tuna to be comfirmed
for k in range(K):
logit_a = h.mm(G_T)
a = self.softmax(logit_a)
r = a.mm(G)
x = torch.cat((f, r),1)
h, c = self.lstmcell(x, (h, c))
h = h + f
return h
def cuda(self):
super(FullyContextualEmbedding, self).cuda()
self.c_0 = self.c_0.cuda()
return self
| 3,749 | 35.764706 | 199 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/hypernet_kernel.py | from copy import deepcopy
from typing import Optional, Tuple
import torch
from torch import nn
from methods.hypernets import HyperNetPOC
from methods.hypernets.utils import set_from_param_dict, accuracy_from_scores
from methods.kernel_convolutions import KernelConv
from methods.kernels import init_kernel_function
from methods.transformer import TransformerEncoder
class HyperShot(HyperNetPOC):
def __init__(
self, model_func: nn.Module, n_way: int, n_support: int, n_query: int,
params: "ArgparseHNParams", target_net_architecture: Optional[nn.Module] = None
):
super().__init__(
model_func, n_way, n_support, n_query, params=params, target_net_architecture=target_net_architecture
)
# TODO - check!!!
# Use support embeddings - concatenate them with kernel features
self.hn_use_support_embeddings: bool = params.hn_use_support_embeddings
# Remove self relations by matrix K multiplication
self.hn_no_self_relations: bool = params.hn_no_self_relations
self.kernel_function = init_kernel_function(
kernel_input_dim=self.feat_dim + self.n_way if self.attention_embedding else self.feat_dim,
params=params
)
# embedding size
# TODO - add attention based input also
self.embedding_size = self.init_embedding_size(params)
# I will be adding the kernel vector to the stacked images embeddings
# TODO: add/check changes for attention-like input
self.hn_kernel_invariance: bool = params.hn_kernel_invariance
if self.hn_kernel_invariance:
self.hn_kernel_invariance_type: str = params.hn_kernel_invariance_type
self.hn_kernel_invariance_pooling: str = params.hn_kernel_invariance_pooling
if self.hn_kernel_invariance_type == 'attention':
self.init_kernel_transformer_architecture(params)
else:
self.init_kernel_convolution_architecture(params)
self.query_relations_size = self.n_way * self.n_support_size_context
self.target_net_architecture = target_net_architecture or self.build_target_net_architecture(params)
self.init_hypernet_modules()
def init_embedding_size(self, params) -> int:
if params.hn_use_support_embeddings:
support_embeddings_size = self.feat_dim * self.n_way * self.n_support_size_context
else:
support_embeddings_size = 0
if params.hn_kernel_invariance:
if params.hn_kernel_invariance_type == 'attention':
return support_embeddings_size + (self.n_way * self.n_support_size_context)
else:
return support_embeddings_size + params.hn_kernel_convolution_output_dim
else:
if params.hn_no_self_relations:
return support_embeddings_size + (
((self.n_way * self.n_support_size_context) ** 2) - (
self.n_way * self.n_support_size_context))
else:
return support_embeddings_size + ((self.n_way * self.n_support_size_context) ** 2)
@property
def n_support_size_context(self) -> int:
return 1 if self.sup_aggregation in ["mean", "min_pooling", "max_pooling"] else self.n_support
def build_target_net_architecture(self, params) -> nn.Module:
tn_hidden_size = params.hn_tn_hidden_size
layers = []
if params.hn_use_support_embeddings:
common_insize = ((self.n_way * self.n_support_size_context) + self.feat_dim)
else:
common_insize = (self.n_way * self.n_support_size_context)
for i in range(params.hn_tn_depth):
is_final = i == (params.hn_tn_depth - 1)
insize = common_insize if i == 0 else tn_hidden_size
outsize = self.n_way if is_final else tn_hidden_size
layers.append(nn.Linear(insize, outsize))
if not is_final:
layers.append(nn.ReLU())
res = nn.Sequential(*layers)
print(res)
return res
def maybe_aggregate_support_feature(self, support_feature: torch.Tensor) -> torch.Tensor:
"""
Process embeddings for few shot learning
"""
if self.n_support > 1:
if self.sup_aggregation == 'mean':
return torch.mean(support_feature, axis=1).reshape(self.n_way, 1, -1)
elif self.sup_aggregation == 'max_pooling':
pooled, _ = torch.max(support_feature, axis=1)
pooled = pooled.reshape(self.n_way, 1, -1)
return pooled
elif self.sup_aggregation == 'min_pooling':
pooled, _ = torch.min(support_feature, axis=1)
pooled = pooled.reshape(self.n_way, 1, -1)
return pooled
return support_feature
def parse_feature(self, x, is_feature) -> Tuple[torch.Tensor, torch.Tensor]:
support_feature, query_feature = super().parse_feature(x, is_feature)
support_feature = self.maybe_aggregate_support_feature(support_feature)
return support_feature, query_feature
def init_kernel_convolution_architecture(self, params):
# TODO - add convolution-based approach
self.kernel_2D_convolution: bool = True
self.kernel_conv: nn.Module = KernelConv(self.n_support, params.hn_kernel_convolution_output_dim)
def init_kernel_transformer_architecture(self, params):
kernel_transformer_input_dim: int = self.n_way * self.n_support_size_context
self.kernel_transformer_encoder: nn.Module = TransformerEncoder(
num_layers=params.kernel_transformer_layers_no,
input_dim=kernel_transformer_input_dim,
num_heads=params.kernel_transformer_heads_no,
dim_feedforward=params.kernel_transformer_feedforward_dim
)
def build_relations_features(self, support_feature: torch.Tensor,
feature_to_classify: torch.Tensor) -> torch.Tensor:
supp_way, n_support, supp_feat = support_feature.shape
n_examples, feat_dim = feature_to_classify.shape
support_features = support_feature.reshape(supp_way * n_support, supp_feat)
kernel_values_tensor = self.kernel_function.forward(support_features, feature_to_classify)
relations = kernel_values_tensor.T
return relations
def build_kernel_features_embedding(self, support_feature: torch.Tensor) -> torch.Tensor:
"""
x_support: [n_way, n_support, hidden_size]
"""
supp_way, n_support, supp_feat = support_feature.shape
support_features = support_feature.reshape(supp_way * n_support, supp_feat)
support_features_copy = torch.clone(support_features)
kernel_values_tensor = self.kernel_function.forward(support_features, support_features_copy)
# Remove self relations by matrix multiplication
if self.hn_no_self_relations:
zero_diagonal_matrix = torch.ones_like(kernel_values_tensor).cuda() - torch.eye(
kernel_values_tensor.shape[0]).cuda()
kernel_values_tensor = kernel_values_tensor * zero_diagonal_matrix
return torch.flatten(kernel_values_tensor[kernel_values_tensor != 0.0])
if self.hn_kernel_invariance:
# TODO - check!!!
if self.hn_kernel_invariance_type == 'attention':
kernel_values_tensor = torch.unsqueeze(kernel_values_tensor.T, 0)
encoded = self.kernel_transformer_encoder.forward(kernel_values_tensor)
if self.hn_kernel_invariance_pooling == 'min':
invariant_kernel_values, _ = torch.min(encoded, 1)
elif self.hn_kernel_invariance_pooling == 'max':
invariant_kernel_values, _ = torch.max(encoded, 1)
else:
invariant_kernel_values = torch.mean(encoded, 1)
return invariant_kernel_values
else:
# TODO - add convolutional approach
kernel_values_tensor = torch.unsqueeze(torch.unsqueeze(kernel_values_tensor.T, 0), 0)
invariant_kernel_values = torch.flatten(self.kernel_conv.forward(kernel_values_tensor))
return invariant_kernel_values
return kernel_values_tensor
def generate_target_net(self, support_feature: torch.Tensor) -> nn.Module:
"""
x_support: [n_way, n_support, hidden_size]
"""
embedding = self.build_kernel_features_embedding(support_feature)
embedding = embedding.reshape(1, self.embedding_size)
# TODO - check!!!
if self.hn_use_support_embeddings:
embedding = torch.cat((embedding, torch.flatten(support_feature)), 0)
root = self.hypernet_neck(embedding)
network_params = {
name.replace("-", "."): param_net(root).reshape(self.target_net_param_shapes[name])
for name, param_net in self.hypernet_heads.items()
}
tn = deepcopy(self.target_net_architecture)
set_from_param_dict(tn, network_params)
tn.support_feature = support_feature
return tn.cuda()
def set_forward(self, x: torch.Tensor, is_feature: bool = False, permutation_sanity_check: bool = False):
support_feature, query_feature = self.parse_feature(x, is_feature)
classifier = self.generate_target_net(support_feature)
query_feature = query_feature.reshape(
-1, query_feature.shape[-1]
)
relational_query_feature = self.build_relations_features(support_feature, query_feature)
# TODO - check!!!
if self.hn_use_support_embeddings:
relational_query_feature = torch.cat((relational_query_feature, query_feature), 1)
y_pred = classifier(relational_query_feature)
if permutation_sanity_check:
### random permutation test
perm = torch.randperm(len(query_feature))
rev_perm = torch.argsort(perm)
query_perm = query_feature[perm]
relation_perm = self.build_relations_features(support_feature, query_perm)
assert torch.equal(relation_perm[rev_perm], relational_query_feature)
y_pred_perm = classifier(relation_perm)
assert torch.equal(y_pred_perm[rev_perm], y_pred)
return y_pred
def set_forward_with_adaptation(self, x: torch.Tensor):
y_pred, metrics = super().set_forward_with_adaptation(x)
support_feature, query_feature = self.parse_feature(x, is_feature=False)
query_feature = query_feature.reshape(
-1, query_feature.shape[-1]
)
relational_query_feature = self.build_relations_features(support_feature, query_feature)
metrics["accuracy/val_relational"] = accuracy_from_scores(relational_query_feature, self.n_way, self.n_query)
return y_pred, metrics
def set_forward_loss(
self, x: torch.Tensor, detach_ft_hn: bool = False, detach_ft_tn: bool = False,
train_on_support: bool = True,
train_on_query: bool = True
):
nw, ne, c, h, w = x.shape
support_feature, query_feature = self.parse_feature(x, is_feature=False)
# TODO: add/check changes for attention-like input
if self.attention_embedding:
y_support = self.get_labels(support_feature)
y_query = self.get_labels(query_feature)
y_support_one_hot = torch.nn.functional.one_hot(y_support)
support_feature_with_classes_one_hot = torch.cat((support_feature, y_support_one_hot), 2)
y_query_zeros = torch.zeros((y_query.shape[0], y_query.shape[1], y_support_one_hot.shape[2]))
query_feature_with_zeros = torch.cat((query_feature, y_query_zeros), 2)
feature_to_hn = support_feature_with_classes_one_hot.detach() if detach_ft_hn else support_feature_with_classes_one_hot
query_feature_to_hn = query_feature_with_zeros
else:
feature_to_hn = support_feature.detach() if detach_ft_hn else support_feature
query_feature_to_hn = query_feature
classifier = self.generate_target_net(feature_to_hn)
feature_to_classify = []
y_to_classify_gt = []
if train_on_support:
feature_to_classify.append(
support_feature.reshape(
(self.n_way * self.n_support_size_context), support_feature.shape[-1]
)
)
y_support = self.get_labels(support_feature)
y_to_classify_gt.append(y_support.reshape(self.n_way * self.n_support_size_context))
if train_on_query:
feature_to_classify.append(
query_feature.reshape(
(self.n_way * (ne - self.n_support)), query_feature.shape[-1]
)
)
y_query = self.get_labels(query_feature)
y_to_classify_gt.append(y_query.reshape(self.n_way * (ne - self.n_support)))
feature_to_classify = torch.cat(feature_to_classify)
y_to_classify_gt = torch.cat(y_to_classify_gt)
relational_feature_to_classify = self.build_relations_features(support_feature, feature_to_classify)
if detach_ft_tn:
relational_feature_to_classify = relational_feature_to_classify.detach()
if self.hn_use_support_embeddings:
relational_feature_to_classify = torch.cat((relational_feature_to_classify, feature_to_classify), 1)
y_pred = classifier(relational_feature_to_classify)
return self.loss_fn(y_pred, y_to_classify_gt)
| 13,719 | 43.983607 | 131 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/hypermaml.py | from collections import defaultdict
from copy import deepcopy
from time import time
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import backbone
from methods.hypernets.utils import get_param_dict, accuracy_from_scores
from methods.maml import MAML
class HyperNet(nn.Module):
def __init__(self, hn_hidden_size, n_way, embedding_size, feat_dim, out_neurons, params):
super(HyperNet, self).__init__()
self.hn_head_len = params.hn_head_len
head = [nn.Linear(embedding_size, hn_hidden_size), nn.ReLU()]
if self.hn_head_len > 2:
for i in range(self.hn_head_len - 2):
head.append(nn.Linear(hn_hidden_size, hn_hidden_size))
head.append(nn.ReLU())
self.head = nn.Sequential(*head)
tail = [nn.Linear(hn_hidden_size, out_neurons)]
self.tail = nn.Sequential(*tail)
def forward(self, x):
out = self.head(x)
out = self.tail(out)
return out
class HyperMAML(MAML):
def __init__(self, model_func, n_way, n_support, n_query, params=None, approx=False):
super(HyperMAML, self).__init__(model_func, n_way, n_support, n_query, params=params)
self.loss_fn = nn.CrossEntropyLoss()
self.hn_tn_hidden_size = params.hn_tn_hidden_size
self.hn_tn_depth = params.hn_tn_depth
self._init_classifier()
self.enhance_embeddings = params.hm_enhance_embeddings
self.n_task = 4
self.task_update_num = 5
self.train_lr = 0.01
self.approx = approx # first order approx.
self.hn_sup_aggregation = params.hn_sup_aggregation
self.hn_hidden_size = params.hn_hidden_size
self.hm_lambda = params.hm_lambda
self.hm_save_delta_params = params.hm_save_delta_params
self.hm_use_class_batch_input = params.hm_use_class_batch_input
self.hn_adaptation_strategy = params.hn_adaptation_strategy
self.hm_support_set_loss = params.hm_support_set_loss
self.hm_maml_warmup = params.hm_maml_warmup
self.hm_maml_warmup_epochs = params.hm_maml_warmup_epochs
self.hm_maml_warmup_switch_epochs = params.hm_maml_warmup_switch_epochs
self.hm_maml_update_feature_net = params.hm_maml_update_feature_net
self.hm_update_operator = params.hm_update_operator
self.hm_load_feature_net = params.hm_load_feature_net
self.hm_feature_net_path = params.hm_feature_net_path
self.hm_detach_feature_net = params.hm_detach_feature_net
self.hm_detach_before_hyper_net = params.hm_detach_before_hyper_net
self.hm_set_forward_with_adaptation = params.hm_set_forward_with_adaptation
self.hn_val_lr = params.hn_val_lr
self.hn_val_epochs = params.hn_val_epochs
self.hn_val_optim = params.hn_val_optim
self.alpha = 0
self.hn_alpha_step = params.hn_alpha_step
if self.hn_adaptation_strategy == 'increasing_alpha' and self.hn_alpha_step < 0:
raise ValueError('hn_alpha_step is not positive!')
self.single_test = False
self.epoch = -1
self.start_epoch = -1
self.stop_epoch = -1
self.calculate_embedding_size()
self._init_hypernet_modules(params)
self._init_feature_net()
# print(self)
def _init_feature_net(self):
if self.hm_load_feature_net:
print(f'loading feature net model from location: {self.hm_feature_net_path}')
model_dict = torch.load(self.hm_feature_net_path)
self.feature.load_state_dict(model_dict['state'])
def _init_classifier(self):
assert self.hn_tn_hidden_size % self.n_way == 0, f"hn_tn_hidden_size {self.hn_tn_hidden_size} should be the multiple of n_way {self.n_way}"
layers = []
for i in range(self.hn_tn_depth):
in_dim = self.feat_dim if i == 0 else self.hn_tn_hidden_size
out_dim = self.n_way if i == (self.hn_tn_depth - 1) else self.hn_tn_hidden_size
linear = backbone.Linear_fw(in_dim, out_dim)
linear.bias.data.fill_(0)
layers.append(linear)
self.classifier = nn.Sequential(*layers)
def _init_hypernet_modules(self, params):
target_net_param_dict = get_param_dict(self.classifier)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.hypernet_heads = nn.ModuleDict()
for name, param in target_net_param_dict.items():
if self.hm_use_class_batch_input and name[-4:] == 'bias':
continue
bias_size = param.shape[0] // self.n_way
head_in = self.embedding_size
head_out = (param.numel() // self.n_way) + bias_size if self.hm_use_class_batch_input else param.numel()
head_modules = []
self.hypernet_heads[name] = HyperNet(self.hn_hidden_size, self.n_way, head_in, self.feat_dim, head_out,
params)
def calculate_embedding_size(self):
n_classes_in_embedding = 1 if self.hm_use_class_batch_input else self.n_way
n_support_per_class = 1 if self.hn_sup_aggregation == 'mean' else self.n_support
single_support_embedding_len = self.feat_dim + self.n_way + 1 if self.enhance_embeddings else self.feat_dim
self.embedding_size = n_classes_in_embedding * n_support_per_class * single_support_embedding_len
def apply_embeddings_strategy(self, embeddings):
if self.hn_sup_aggregation == 'mean':
new_embeddings = torch.zeros(self.n_way, *embeddings.shape[1:])
for i in range(self.n_way):
lower = i * self.n_support
upper = (i + 1) * self.n_support
new_embeddings[i] = embeddings[lower:upper, :].mean(dim=0)
return new_embeddings.cuda()
return embeddings
def get_support_data_labels(self):
return torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda() # labels for support data
def get_hn_delta_params(self, support_embeddings):
if self.hm_detach_before_hyper_net:
support_embeddings = support_embeddings.detach()
if self.hm_use_class_batch_input:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
support_embeddings_resh = support_embeddings.reshape(
self.n_way, -1
)
delta_params = param_net(support_embeddings_resh)
bias_neurons_num = self.target_net_param_shapes[name][0] // self.n_way
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta_params = delta_params * self.alpha
weights_delta = delta_params[:, :-bias_neurons_num]
bias_delta = delta_params[:, -bias_neurons_num:].flatten()
delta_params_list.extend([weights_delta, bias_delta])
return delta_params_list
else:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
flattened_embeddings = support_embeddings.flatten()
delta = param_net(flattened_embeddings)
if name in self.target_net_param_shapes.keys():
delta = delta.reshape(self.target_net_param_shapes[name])
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta = self.alpha * delta
delta_params_list.append(delta)
return delta_params_list
def _update_weight(self, weight, update_value):
if self.hm_update_operator == 'minus':
if weight.fast is None:
weight.fast = weight - update_value
else:
weight.fast = weight.fast - update_value
elif self.hm_update_operator == 'plus':
if weight.fast is None:
weight.fast = weight + update_value
else:
weight.fast = weight.fast + update_value
elif self.hm_update_operator == 'multiply':
if weight.fast is None:
weight.fast = weight * update_value
else:
weight.fast = weight.fast * update_value
def _get_p_value(self):
if self.epoch < self.hm_maml_warmup_epochs:
return 1.0
elif self.hm_maml_warmup_epochs <= self.epoch < self.hm_maml_warmup_epochs + self.hm_maml_warmup_switch_epochs:
return (self.hm_maml_warmup_switch_epochs + self.hm_maml_warmup_epochs - self.epoch) / (
self.hm_maml_warmup_switch_epochs + 1)
return 0.0
def _update_network_weights(self, delta_params_list, support_embeddings, support_data_labels, train_stage=False):
if self.hm_maml_warmup and not self.single_test:
p = self._get_p_value()
if p > 0.0:
fast_parameters = []
if self.hm_maml_update_feature_net:
fet_fast_parameters = list(self.feature.parameters())
for weight in self.feature.parameters():
weight.fast = None
self.feature.zero_grad()
fast_parameters = fast_parameters + fet_fast_parameters
clf_fast_parameters = list(self.classifier.parameters())
for weight in self.classifier.parameters():
weight.fast = None
self.classifier.zero_grad()
fast_parameters = fast_parameters + clf_fast_parameters
for task_step in range(self.task_update_num):
scores = self.classifier(support_embeddings)
set_loss = self.loss_fn(scores, support_data_labels)
grad = torch.autograd.grad(set_loss, fast_parameters, create_graph=True,
allow_unused=True) # build full graph support gradient of gradient
if self.approx:
grad = [g.detach() for g in
grad] # do not calculate gradient of gradient if using first order approximation
if self.hm_maml_update_feature_net:
# update weights of feature networ
for k, weight in enumerate(self.feature.parameters()):
update_value = self.train_lr * p * grad[k]
self._update_weight(weight, update_value)
classifier_offset = len(fet_fast_parameters) if self.hm_maml_update_feature_net else 0
if p == 1:
# update weights of classifier network by adding gradient
for k, weight in enumerate(self.classifier.parameters()):
update_value = (self.train_lr * grad[classifier_offset + k])
self._update_weight(weight, update_value)
elif 0.0 < p < 1.0:
# update weights of classifier network by adding gradient and output of hypernetwork
for k, weight in enumerate(self.classifier.parameters()):
update_value = ((self.train_lr * p * grad[classifier_offset + k]) + (
(1 - p) * delta_params_list[k]))
self._update_weight(weight, update_value)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_value = delta_params_list[k]
self._update_weight(weight, update_value)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_value = delta_params_list[k]
self._update_weight(weight, update_value)
def _get_list_of_delta_params(self, maml_warmup_used, support_embeddings, support_data_labels):
if not maml_warmup_used:
if self.enhance_embeddings:
with torch.no_grad():
logits = self.classifier.forward(support_embeddings).detach()
logits = F.softmax(logits, dim=1)
labels = support_data_labels.view(support_embeddings.shape[0], -1)
support_embeddings = torch.cat((support_embeddings, logits, labels), dim=1)
for weight in self.parameters():
weight.fast = None
self.zero_grad()
support_embeddings = self.apply_embeddings_strategy(support_embeddings)
delta_params = self.get_hn_delta_params(support_embeddings)
if self.hm_save_delta_params and len(self.delta_list) == 0:
self.delta_list = [{'delta_params': delta_params}]
return delta_params
else:
return [torch.zeros(*i).cuda() for (_, i) in self.target_net_param_shapes.items()]
def forward(self, x):
out = self.feature.forward(x)
if self.hm_detach_feature_net:
out = out.detach()
scores = self.classifier.forward(out)
return scores
def set_forward(self, x, is_feature=False, train_stage=False):
""" 1. Get delta params from hypernetwork with support data.
2. Update target- network weights.
3. Forward with query data.
4. Return scores"""
assert is_feature == False, 'MAML do not support fixed feature'
x = x.cuda()
x_var = Variable(x)
support_data = x_var[:, :self.n_support, :, :, :].contiguous().view(self.n_way * self.n_support,
*x.size()[2:]) # support data
query_data = x_var[:, self.n_support:, :, :, :].contiguous().view(self.n_way * self.n_query,
*x.size()[2:]) # query data
support_data_labels = self.get_support_data_labels()
support_embeddings = self.feature(support_data)
if self.hm_detach_feature_net:
support_embeddings = support_embeddings.detach()
maml_warmup_used = (
(not self.single_test) and self.hm_maml_warmup and (self.epoch < self.hm_maml_warmup_epochs))
delta_params_list = self._get_list_of_delta_params(maml_warmup_used, support_embeddings, support_data_labels)
self._update_network_weights(delta_params_list, support_embeddings, support_data_labels, train_stage)
if self.hm_set_forward_with_adaptation and not train_stage:
scores = self.forward(support_data)
return scores, None
else:
if self.hm_support_set_loss and train_stage and not maml_warmup_used:
query_data = torch.cat((support_data, query_data))
scores = self.forward(query_data)
# sum of delta params for regularization
if self.hm_lambda != 0:
total_delta_sum = sum([delta_params.pow(2.0).sum() for delta_params in delta_params_list])
return scores, total_delta_sum
else:
return scores, None
def set_forward_adaptation(self, x, is_feature=False): # overwrite parrent function
raise ValueError('MAML performs further adapation simply by increasing task_upate_num')
def set_forward_loss(self, x):
scores, total_delta_sum = self.set_forward(x, is_feature=False, train_stage=True)
query_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_query))).cuda()
if self.hm_support_set_loss:
support_data_labels = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
query_data_labels = torch.cat((support_data_labels, query_data_labels))
loss = self.loss_fn(scores, query_data_labels)
if self.hm_lambda != 0:
loss = loss + self.hm_lambda * total_delta_sum
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = query_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(query_data_labels)) * 100
return loss, task_accuracy
def set_forward_loss_with_adaptation(self, x):
scores, _ = self.set_forward(x, is_feature=False, train_stage=False)
support_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_support))).cuda()
loss = self.loss_fn(scores, support_data_labels)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = support_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(support_data_labels)) * 100
return loss, task_accuracy
def train_loop(self, epoch, train_loader, optimizer): # overwrite parrent function
print_freq = 10
avg_loss = 0
task_count = 0
loss_all = []
acc_all = []
optimizer.zero_grad()
self.delta_list = []
# train
for i, (x, _) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
loss, task_accuracy = self.set_forward_loss(x)
avg_loss = avg_loss + loss.item() # .data[0]
loss_all.append(loss)
acc_all.append(task_accuracy)
task_count += 1
if task_count == self.n_task: # MAML update several tasks at one time
loss_q = torch.stack(loss_all).sum(0)
loss_q.backward()
optimizer.step()
task_count = 0
loss_all = []
optimizer.zero_grad()
if i % print_freq == 0:
print('Epoch {:d}/{:d} | Batch {:d}/{:d} | Loss {:f}'.format(self.epoch, self.stop_epoch, i,
len(train_loader),
avg_loss / float(i + 1)))
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
metrics = {"accuracy/train": acc_mean}
if self.hn_adaptation_strategy == 'increasing_alpha':
metrics['alpha'] = self.alpha
if self.hm_save_delta_params and len(self.delta_list) > 0:
delta_params = {"epoch": self.epoch, "delta_list": self.delta_list}
metrics['delta_params'] = delta_params
if self.alpha < 1:
self.alpha += self.hn_alpha_step
return metrics
def test_loop(self, test_loader, return_std=False, return_time: bool = False): # overwrite parrent function
acc_all = []
self.delta_list = []
acc_at = defaultdict(list)
iter_num = len(test_loader)
eval_time = 0
if self.hm_set_forward_with_adaptation:
for i, (x, _) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
s = time()
acc_task, acc_at_metrics = self.set_forward_with_adaptation(x)
t = time()
for (k, v) in acc_at_metrics.items():
acc_at[k].append(v)
acc_all.append(acc_task)
eval_time += (t - s)
else:
for i, (x, _) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), f"MAML do not support way change, {self.n_way=}, {x.size(0)=}"
s = time()
correct_this, count_this = self.correct(x)
t = time()
acc_all.append(correct_this / count_this * 100)
eval_time += (t - s)
metrics = {
k: np.mean(v) if len(v) > 0 else 0
for (k, v) in acc_at.items()
}
num_tasks = len(acc_all)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' % (iter_num, acc_mean, 1.96 * acc_std / np.sqrt(iter_num)))
print("Num tasks", num_tasks)
ret = [acc_mean]
if return_std:
ret.append(acc_std)
if return_time:
ret.append(eval_time)
ret.append(metrics)
return ret
def set_forward_with_adaptation(self, x: torch.Tensor):
self_copy = deepcopy(self)
# deepcopy does not copy "fast" parameters so it should be done manually
for param1, param2 in zip(self.parameters(), self_copy.parameters()):
if hasattr(param1, 'fast'):
if param1.fast is not None:
param2.fast = param1.fast.clone()
else:
param2.fast = None
metrics = {
"accuracy/val@-0": self_copy.query_accuracy(x)
}
val_opt_type = torch.optim.Adam if self.hn_val_optim == "adam" else torch.optim.SGD
val_opt = val_opt_type(self_copy.parameters(), lr=self.hn_val_lr)
if self.hn_val_epochs > 0:
for i in range(1, self.hn_val_epochs + 1):
self_copy.train()
val_opt.zero_grad()
loss, val_support_acc = self_copy.set_forward_loss_with_adaptation(x)
loss.backward()
val_opt.step()
self_copy.eval()
metrics[f"accuracy/val_support_acc@-{i}"] = val_support_acc
metrics[f"accuracy/val_loss@-{i}"] = loss.item()
metrics[f"accuracy/val@-{i}"] = self_copy.query_accuracy(x)
# free CUDA memory by deleting "fast" parameters
for param in self_copy.parameters():
param.fast = None
return metrics[f"accuracy/val@-{self.hn_val_epochs}"], metrics
def query_accuracy(self, x: torch.Tensor) -> float:
scores, _ = self.set_forward(x, train_stage=True)
return 100 * accuracy_from_scores(scores, n_way=self.n_way, n_query=self.n_query)
def get_logits(self, x):
self.n_query = x.size(1) - self.n_support
logits, _ = self.set_forward(x)
return logits
def correct(self, x):
scores, _ = self.set_forward(x)
y_query = np.repeat(range(self.n_way), self.n_query)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:, 0] == y_query)
return float(top1_correct), len(y_query)
| 23,248 | 39.503484 | 147 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/utils.py | from typing import Dict
import numpy as np
import torch
from torch import nn
def get_param_dict(net: nn.Module) -> Dict[str, nn.Parameter]:
"""A dict of named parameters of an nn.Module"""
return {
n: p
for (n, p) in net.named_parameters()
}
def set_from_param_dict(module: nn.Module, param_dict: Dict[str, torch.Tensor]):
"""
Sets the values of `module` parameters with the values from `param_dict`.
Works just like:
nn.Module.load_state_dict()
with the exception that those parameters are not tunable by default, because
we set their values to bare tensors instead of nn.Parameter.
This means that a network with such params cannot be trained directly with an optimizer.
However, gradients may still flow through those tensors, so it's useful for the use-case of hypernetworks.
"""
for (sdk, v) in param_dict.items():
keys = sdk.split(".")
param_name = keys[-1]
m = module
for k in keys[:-1]:
try:
k = int(k)
m = m[k]
except:
m = getattr(m, k)
param = getattr(m, param_name)
assert param.shape == v.shape, (sdk, param.shape, v.shape)
delattr(m, param_name)
setattr(m, param_name, v)
class SinActivation(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.sin(x)
def accuracy_from_scores(scores: torch.Tensor, n_way: int, n_query: int) -> float:
"""Assumes that scores are for examples sorted by class!"""
s_nq, s_nw = scores.shape
assert (s_nq, s_nw) == (n_way * n_query, n_way), ((s_nq, s_nw), (n_query, n_way))
y_query = np.repeat(range(n_way), n_query)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:, 0] == y_query)
correct_this = float(top1_correct)
count_this = len(y_query)
return correct_this / count_this
def kl_diag_gauss_with_standard_gauss(mean, logvar):
mean_flat = torch.cat([t.view(-1) for t in mean])
logvar_flat = torch.cat([t.view(-1) for t in logvar])
var_flat = logvar_flat.exp()
return -0.5 * torch.sum(1 + logvar_flat - mean_flat.pow(2) - var_flat)
def reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu | 2,398 | 31.418919 | 110 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/hypernet_poc.py | from collections import defaultdict
from copy import deepcopy
from typing import Dict, Optional
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from methods.hypernets.utils import get_param_dict, set_from_param_dict, SinActivation, accuracy_from_scores
from methods.meta_template import MetaTemplate
from methods.transformer import TransformerEncoder
ALLOWED_AGGREGATIONS = ["concat", "mean", "max_pooling", "min_pooling"]
class HyperNetPOC(MetaTemplate):
def __init__(
self, model_func: nn.Module, n_way: int, n_support: int, n_query: int,
params: "ArgparseHNParams", target_net_architecture: Optional[nn.Module] = None
):
super().__init__(model_func, n_way, n_support)
self.feat_dim = self.feature.final_feat_dim = 64 if params.dataset == "cross_char" else 1600
self.n_query = n_query
self.taskset_size: int = params.hn_taskset_size
self.taskset_print_every: int = params.hn_taskset_print_every
self.hn_hidden_size: int = params.hn_hidden_size
self.attention_embedding: bool = params.hn_attention_embedding
self.sup_aggregation: str = params.hn_sup_aggregation
self.detach_ft_in_hn: int = params.hn_detach_ft_in_hn
self.detach_ft_in_tn: int = params.hn_detach_ft_in_tn
self.hn_neck_len: int = params.hn_neck_len
self.hn_head_len: int = params.hn_head_len
self.taskset_repeats_config: str = params.hn_taskset_repeats
self.hn_dropout: float = params.hn_dropout
self.hn_val_epochs: int = params.hn_val_epochs
self.hn_val_lr: float = params.hn_val_lr
self.hn_val_optim: float = params.hn_val_optim
self.embedding_size = self.init_embedding_size(params)
self.target_net_architecture = target_net_architecture or self.build_target_net_architecture(params)
self.loss_fn = nn.CrossEntropyLoss()
self.init_hypernet_modules()
if self.attention_embedding:
self.init_transformer_architecture(params)
print(self.target_net_architecture)
def init_embedding_size(self, params) -> int:
if self.attention_embedding:
return (self.feat_dim + self.n_way) * self.n_way * self.n_support
else:
assert self.sup_aggregation in ALLOWED_AGGREGATIONS
if self.sup_aggregation == "concat":
return self.feat_dim * self.n_way * self.n_support
elif self.sup_aggregation in ["mean", "max_pooling", "min_pooling"]:
return self.feat_dim * self.n_way
def build_target_net_architecture(self, params) -> nn.Module:
tn_hidden_size = params.hn_tn_hidden_size
layers = []
for i in range(params.hn_tn_depth):
is_final = i == (params.hn_tn_depth - 1)
insize = self.feat_dim if i == 0 else tn_hidden_size
outsize = self.n_way if is_final else tn_hidden_size
layers.append(nn.Linear(insize, outsize))
if not is_final:
layers.append(nn.ReLU())
res = nn.Sequential(*layers)
print(res)
return res
def init_transformer_architecture(self, params):
transformer_input_dim: int = self.feat_dim + self.n_way
self.transformer_encoder: nn.Module = TransformerEncoder(
num_layers=params.hn_transformer_layers_no, input_dim=transformer_input_dim,
num_heads=params.hn_transformer_heads_no, dim_feedforward=params.hn_transformer_feedforward_dim)
def init_hypernet_modules(self):
target_net_param_dict = get_param_dict(self.target_net_architecture)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.init_hypernet_neck()
self.hypernet_heads = nn.ModuleDict()
assert self.hn_head_len >= 1, "Head len must be >= 1!"
for name, param in target_net_param_dict.items():
head_in = self.embedding_size if self.hn_neck_len == 0 else self.hn_hidden_size
head_out = param.numel()
head_modules = []
for i in range(self.hn_head_len):
in_size = head_in if i == 0 else self.hn_hidden_size
is_final = (i == (self.hn_head_len - 1))
out_size = head_out if is_final else self.hn_hidden_size
head_modules.extend([nn.Dropout(self.hn_dropout), nn.Linear(in_size, out_size)])
if not is_final:
head_modules.append(nn.ReLU())
self.hypernet_heads[name] = nn.Sequential(*head_modules)
def init_hypernet_neck(self):
neck_modules = []
if self.hn_neck_len > 0:
neck_modules = [
nn.Linear(self.embedding_size, self.hn_hidden_size),
nn.ReLU()
]
for _ in range(self.hn_neck_len - 1):
neck_modules.extend(
[nn.Dropout(self.hn_dropout), nn.Linear(self.hn_hidden_size, self.hn_hidden_size), nn.ReLU()]
)
neck_modules = neck_modules[:-1] # remove the last ReLU
self.hypernet_neck = nn.Sequential(*neck_modules)
def taskset_repeats(self, epoch: int):
epoch_ceiling_to_n_repeats = {
int(kv.split(":")[0]): int(kv.split(":")[1])
for kv in self.taskset_repeats_config.split("-")
}
epoch_ceiling_to_n_repeats = {k: v for (k, v) in epoch_ceiling_to_n_repeats.items() if k > epoch}
if len(epoch_ceiling_to_n_repeats) == 0:
return 1
return epoch_ceiling_to_n_repeats[min(epoch_ceiling_to_n_repeats.keys())]
def get_labels(self, x: torch.Tensor) -> torch.Tensor:
"""
x: [n_way, n_shot, hidden_size]
"""
ys = torch.tensor(list(range(x.shape[0]))).reshape(len(x), 1)
ys = ys.repeat(1, x.shape[1]).to(x.device)
return ys.cuda()
def maybe_aggregate_support_feature(self, support_feature: torch.Tensor) -> torch.Tensor:
way, n_support, feat = support_feature.shape
if self.sup_aggregation == "concat":
features = support_feature.reshape(way * n_support, feat)
elif self.sup_aggregation == "sum":
features = support_feature.sum(dim=1)
way, feat = features.shape
assert (way, feat) == (self.n_way, self.feat_dim)
elif self.sup_aggregation == "mean":
features = support_feature.mean(dim=1)
way, feat = features.shape
assert (way, feat) == (self.n_way, self.feat_dim)
else:
raise TypeError(self.sup_aggregation)
return features
def build_embedding(self, support_feature: torch.Tensor) -> torch.Tensor:
way, n_support, feat = support_feature.shape
if self.attention_embedding:
features = support_feature.view(1, -1, *(support_feature.size()[2:]))
attention_features = torch.flatten(self.transformer_encoder.forward(features))
return attention_features
features = self.maybe_aggregate_support_feature(support_feature)
features = features.reshape(1, -1)
return features
def generate_network_params(self, support_feature: torch.Tensor) -> Dict[str, torch.Tensor]:
embedding = self.build_embedding(support_feature)
root = self.hypernet_neck(embedding)
network_params = {
name.replace("-", "."): param_net(root).reshape(self.target_net_param_shapes[name])
for name, param_net in self.hypernet_heads.items()
}
return network_params
def generate_target_net(self, support_feature: torch.Tensor) -> nn.Module:
"""
x_support: [n_way, n_support, hidden_size]
"""
network_params = self.generate_network_params(support_feature)
tn = deepcopy(self.target_net_architecture)
set_from_param_dict(tn, network_params)
return tn.cuda()
def set_forward(self, x: torch.Tensor, is_feature: bool = False, permutation_sanity_check: bool = False):
support_feature, query_feature = self.parse_feature(x, is_feature)
if self.attention_embedding:
y_support = self.get_labels(support_feature)
y_support_one_hot = torch.nn.functional.one_hot(y_support)
support_feature_with_classes_one_hot = torch.cat((support_feature, y_support_one_hot), 2)
support_feature = support_feature_with_classes_one_hot
classifier = self.generate_target_net(support_feature)
query_feature = query_feature.reshape(
-1, query_feature.shape[-1]
)
y_pred = classifier(query_feature)
if permutation_sanity_check:
### random permutation test
perm = torch.randperm(len(query_feature))
rev_perm = torch.argsort(perm)
query_perm = query_feature[perm]
assert torch.equal(query_perm[rev_perm], query_feature)
y_pred_perm = classifier(query_perm)
assert torch.equal(y_pred_perm[rev_perm], y_pred)
return y_pred
def set_forward_with_adaptation(self, x: torch.Tensor):
self_copy = deepcopy(self)
metrics = {
"accuracy/val@-0": self_copy.query_accuracy(x)
}
val_opt_type = torch.optim.Adam if self.hn_val_optim == "adam" else torch.optim.SGD
val_opt = val_opt_type(self_copy.parameters(), lr=self.hn_val_lr)
if self.hn_val_epochs > 0:
for i in range(1, self.hn_val_epochs + 1):
self_copy.train()
val_opt.zero_grad()
loss = self_copy.set_forward_loss(x, train_on_query=False)
loss.backward()
val_opt.step()
self_copy.eval()
metrics[f"accuracy/val@-{i}"] = self_copy.query_accuracy(x)
return self_copy.set_forward(x, permutation_sanity_check=True), metrics
def query_accuracy(self, x: torch.Tensor) -> float:
scores = self.set_forward(x)
return accuracy_from_scores(scores, n_way=self.n_way, n_query=self.n_query)
def set_forward_loss(
self, x: torch.Tensor, detach_ft_hn: bool = False, detach_ft_tn: bool = False,
train_on_support: bool = True,
train_on_query: bool = True
):
n_way, n_examples, c, h, w = x.shape
support_feature, query_feature = self.parse_feature(x, is_feature=False)
if self.attention_embedding:
y_support = self.get_labels(support_feature)
y_query = self.get_labels(query_feature)
y_support_one_hot = torch.nn.functional.one_hot(y_support)
support_feature_with_classes_one_hot = torch.cat((support_feature, y_support_one_hot), 2)
feature_to_hn = support_feature_with_classes_one_hot.detach() if detach_ft_hn else support_feature_with_classes_one_hot
else:
feature_to_hn = support_feature.detach() if detach_ft_hn else support_feature
classifier = self.generate_target_net(feature_to_hn)
feature_to_classify = []
y_to_classify_gt = []
if train_on_support:
feature_to_classify.append(
support_feature.reshape(
(self.n_way * self.n_support), support_feature.shape[-1]
)
)
y_support = self.get_labels(support_feature)
y_to_classify_gt.append(y_support.reshape(self.n_way * self.n_support))
if train_on_query:
feature_to_classify.append(
query_feature.reshape(
(self.n_way * (n_examples - self.n_support)), query_feature.shape[-1]
)
)
y_query = self.get_labels(query_feature)
y_to_classify_gt.append(y_query.reshape(self.n_way * (n_examples - self.n_support)))
feature_to_classify = torch.cat(feature_to_classify)
y_to_classify_gt = torch.cat(y_to_classify_gt)
if detach_ft_tn:
feature_to_classify = feature_to_classify.detach()
y_pred = classifier(feature_to_classify)
return self.loss_fn(y_pred, y_to_classify_gt)
def train_loop(self, epoch: int, train_loader: DataLoader, optimizer: torch.optim.Optimizer):
taskset_id = 0
taskset = []
n_train = len(train_loader)
accuracies = []
losses = []
metrics = defaultdict(list)
ts_repeats = self.taskset_repeats(epoch)
for i, (x, _) in enumerate(train_loader):
taskset.append(x)
# TODO 3: perhaps the idea of tasksets is redundant and it's better to update weights at every task
if i % self.taskset_size == (self.taskset_size - 1) or i == (n_train - 1):
loss_sum = torch.tensor(0).cuda()
for tr in range(ts_repeats):
loss_sum = torch.tensor(0).cuda()
for task in taskset:
if self.change_way:
self.n_way = task.size(0)
self.n_query = task.size(1) - self.n_support
loss = self.set_forward_loss(task)
loss_sum = loss_sum + loss
optimizer.zero_grad()
loss_sum.backward()
if tr == 0:
for k, p in get_param_dict(self).items():
metrics[f"grad_norm/{k}"] = p.grad.abs().mean().item() if p.grad is not None else 0
optimizer.step()
losses.append(loss_sum.item())
accuracies.extend([
self.query_accuracy(task) for task in taskset
])
acc_mean = np.mean(accuracies) * 100
acc_std = np.std(accuracies) * 100
if taskset_id % self.taskset_print_every == 0:
print(
f"Epoch {epoch} | Taskset {taskset_id} | TS {len(taskset)} | TS epochs {ts_repeats} | Loss {loss_sum.item()} | Train acc {acc_mean:.2f} +- {acc_std:.2f} %")
taskset_id += 1
taskset = []
metrics["loss/train"] = np.mean(losses)
metrics["accuracy/train"] = np.mean(accuracies) * 100
return metrics
class PPAMixin(HyperNetPOC):
def build_target_net_architecture(self, params) -> nn.Module:
assert params.hn_tn_depth == 1, "In PPA the target network must be a single linear layer, please use `--hn_tn_depth=1`"
return super().build_target_net_architecture(params)
def init_hypernet_modules(self):
target_net_param_dict = get_param_dict(self.target_net_architecture)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.init_hypernet_neck()
self.hypernet_heads = nn.ModuleDict()
assert self.hn_head_len >= 1, "Head len must be >= 1!"
# assert False, self.target_net_param_shapes
for name, param in target_net_param_dict.items():
head_in = self.embedding_size if self.hn_neck_len == 0 else self.hn_hidden_size
head_modules = []
assert param.numel() % self.n_way == 0, f"Each param in PPA should be divisible by {self.n_way=}, but {name} is of {param.shape=} -> {param.numel()=}"
head_out = param.numel() // self.n_way
for i in range(self.hn_head_len):
in_size = head_in if i == 0 else self.hn_hidden_size
is_final = (i == (self.hn_head_len - 1))
out_size = head_out if is_final else self.hn_hidden_size
head_modules.extend([nn.Dropout(self.hn_dropout), nn.Linear(in_size, out_size)])
if not is_final:
head_modules.append(nn.ReLU())
self.hypernet_heads[name] = nn.Sequential(*head_modules)
class HypernetPPA(PPAMixin, HyperNetPOC):
"""Based loosely on https://arxiv.org/abs/1706.03466"""
def taskset_repeats(self, epoch: int):
return 1
def init_embedding_size(self, params) -> int:
if self.attention_embedding:
raise NotImplementedError()
else:
assert self.sup_aggregation in ALLOWED_AGGREGATIONS
if self.sup_aggregation == "concat":
return self.feat_dim * self.n_support
elif self.sup_aggregation in ["mean", "max_pooling", "min_pooling"]:
return self.feat_dim
def build_embedding(self, support_feature: torch.Tensor) -> torch.Tensor:
way, n_support, feat = support_feature.shape
if self.attention_embedding:
features = support_feature.view(1, -1, *(support_feature.size()[2:]))
attention_features = torch.flatten(self.transformer_encoder.forward(features))
return attention_features
features = self.maybe_aggregate_support_feature(support_feature)
return features
def generate_network_params(self, support_feature: torch.Tensor) -> Dict[str, torch.Tensor]:
embedding = self.build_embedding(support_feature)
assert embedding.shape[0] == self.n_way
root = self.hypernet_neck(embedding)
network_params = {
name.replace("-", "."): param_net(root).reshape(self.target_net_param_shapes[name])
for name, param_net in self.hypernet_heads.items()
}
return network_params
| 18,022 | 41.607565 | 180 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/hypernets/bayeshmaml.py | from copy import deepcopy
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import backbone
from methods.hypernets.utils import get_param_dict, kl_diag_gauss_with_standard_gauss, \
reparameterize
from methods.hypernets.hypermaml import HyperMAML
class BHyperNet(nn.Module):
"""bayesian hypernetwork for target network params"""
def __init__(self, hn_hidden_size, n_way, embedding_size, feat_dim, out_neurons, params):
super(BHyperNet, self).__init__()
self.hn_head_len = params.hn_head_len
head = [nn.Linear(embedding_size, hn_hidden_size), nn.ReLU()]
if self.hn_head_len > 2:
for i in range(self.hn_head_len - 2):
head.append(nn.Linear(hn_hidden_size, hn_hidden_size))
head.append(nn.ReLU())
self.head = nn.Sequential(*head)
# tails to equate weights with distributions
tail_mean = [nn.Linear(hn_hidden_size, out_neurons)]
tail_logvar = [nn.Linear(hn_hidden_size, out_neurons)]
self.tail_mean = nn.Sequential(*tail_mean)
self.tail_logvar = nn.Sequential(*tail_logvar)
def forward(self, x):
out = self.head(x)
out_mean = self.tail_mean(out)
out_logvar = self.tail_logvar(out)
return out_mean, out_logvar
class BayesHMAML(HyperMAML):
def __init__(self, model_func, n_way, n_support, n_query, params=None, approx=False):
super(BayesHMAML, self).__init__(model_func, n_way, n_support, n_query, approx=approx, params=params)
# loss function component
self.loss_kld = kl_diag_gauss_with_standard_gauss # Kullback–Leibler divergence
self.kl_scale = params.kl_scale
self.kl_step = None # increase step for share of kld in loss
self.kl_stop_val = params.kl_stop_val
# num of weight set draws for softvoting
self.weight_set_num_train = params.hm_weight_set_num_train # train phase
self.weight_set_num_test = params.hm_weight_set_num_test if params.hm_weight_set_num_test != 0 else None # test phase
def _init_classifier(self):
assert self.hn_tn_hidden_size % self.n_way == 0, f"hn_tn_hidden_size {self.hn_tn_hidden_size} should be the multiple of n_way {self.n_way}"
layers = []
for i in range(self.hn_tn_depth):
in_dim = self.feat_dim if i == 0 else self.hn_tn_hidden_size
out_dim = self.n_way if i == (self.hn_tn_depth - 1) else self.hn_tn_hidden_size
linear = backbone.BLinear_fw(in_dim, out_dim)
linear.bias.data.fill_(0)
layers.append(linear)
self.classifier = nn.Sequential(*layers)
def _init_hypernet_modules(self, params):
target_net_param_dict = get_param_dict(self.classifier)
target_net_param_dict = {
name.replace(".", "-"): p
# replace dots with hyphens bc torch doesn't like dots in modules names
for name, p in target_net_param_dict.items()
}
self.target_net_param_shapes = {
name: p.shape
for (name, p)
in target_net_param_dict.items()
}
self.hypernet_heads = nn.ModuleDict()
for name, param in target_net_param_dict.items():
if self.hm_use_class_batch_input and name[-4:] == 'bias':
# notice head_out val when using this strategy
continue
bias_size = param.shape[0] // self.n_way
head_in = self.embedding_size
head_out = (param.numel() // self.n_way) + bias_size if self.hm_use_class_batch_input else param.numel()
# make hypernetwork for target network param
self.hypernet_heads[name] = BHyperNet(self.hn_hidden_size, self.n_way, head_in, self.feat_dim, head_out,
params)
def get_hn_delta_params(self, support_embeddings):
if self.hm_detach_before_hyper_net:
support_embeddings = support_embeddings.detach()
if self.hm_use_class_batch_input:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
support_embeddings_resh = support_embeddings.reshape(
self.n_way, -1
)
delta_params_mean, params_logvar = param_net(support_embeddings_resh)
bias_neurons_num = self.target_net_param_shapes[name][0] // self.n_way
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta_params_mean = delta_params_mean * self.alpha
params_logvar = params_logvar * self.alpha
weights_delta_mean = delta_params_mean[:, :-bias_neurons_num].contiguous().view(
*self.target_net_param_shapes[name])
bias_delta_mean = delta_params_mean[:, -bias_neurons_num:].flatten()
weights_logvar = params_logvar[:, :-bias_neurons_num].contiguous().view(
*self.target_net_param_shapes[name])
bias_logvar = params_logvar[:, -bias_neurons_num:].flatten()
delta_params_list.append([weights_delta_mean, weights_logvar])
delta_params_list.append([bias_delta_mean, bias_logvar])
return delta_params_list
else:
delta_params_list = []
for name, param_net in self.hypernet_heads.items():
flattened_embeddings = support_embeddings.flatten()
delta_mean, logvar = param_net(flattened_embeddings)
if name in self.target_net_param_shapes.keys():
delta_mean = delta_mean.reshape(self.target_net_param_shapes[name])
logvar = logvar.reshape(self.target_net_param_shapes[name])
if self.hn_adaptation_strategy == 'increasing_alpha' and self.alpha < 1:
delta_mean = self.alpha * delta_mean
logvar = self.alpha * logvar
delta_params_list.append([delta_mean, logvar])
return delta_params_list
def _update_weight(self, weight, update_mean, logvar, train_stage=False):
""" get distribution associated with weight. Sample weights for target network. """
if update_mean is None and logvar is None:
return
# if weight.mu is None:
if not hasattr(weight, 'mu') or weight.mu is None:
weight.mu = None
weight.mu = weight - update_mean
else:
weight.mu = weight.mu - update_mean
if logvar is None: # used in maml warmup
weight.fast = []
weight.fast.append(weight.mu)
else:
weight.logvar = logvar
weight.fast = []
if train_stage:
for _ in range(self.weight_set_num_train): # sample fast parameters for training
weight.fast.append(reparameterize(weight.mu, weight.logvar))
else:
if self.weight_set_num_test is not None:
for _ in range(self.weight_set_num_test): # sample fast parameters for testing
weight.fast.append(reparameterize(weight.mu, weight.logvar))
else:
weight.fast.append(weight.mu) # return expected value
def _scale_step(self):
"""calculate regularization step for kld"""
if self.kl_step is None:
# scale step is calculated so that share of kld in loss increases kl_scale -> kl_stop_val
self.kl_step = np.power(1 / self.kl_scale * self.kl_stop_val, 1 / self.stop_epoch)
self.kl_scale = self.kl_scale * self.kl_step
def _get_p_value(self):
if self.epoch < self.hm_maml_warmup_epochs:
return 1.0
elif self.hm_maml_warmup_epochs <= self.epoch < self.hm_maml_warmup_epochs + self.hm_maml_warmup_switch_epochs:
return (self.hm_maml_warmup_switch_epochs + self.hm_maml_warmup_epochs - self.epoch) / (
self.hm_maml_warmup_switch_epochs + 1)
return 0.0
def _update_network_weights(self, delta_params_list, support_embeddings, support_data_labels, train_stage=False):
if self.hm_maml_warmup and not self.single_test:
p = self._get_p_value()
# warmup coef p decreases 1 -> 0
if p > 0.0:
fast_parameters = []
clf_fast_parameters = list(self.classifier.parameters())
for weight in self.classifier.parameters():
weight.fast = None
weight.mu = None
# weight.logvar = None
self.classifier.zero_grad()
fast_parameters = fast_parameters + clf_fast_parameters
for task_step in range(self.task_update_num):
scores = self.classifier(support_embeddings)
set_loss = self.loss_fn(scores, support_data_labels)
reduction = self.kl_scale
for weight in self.classifier.parameters():
if weight.logvar is not None:
if weight.mu is not None:
# set_loss = set_loss + self.kl_w * reduction * self.loss_kld(weight.mu, weight.logvar)
set_loss = set_loss + reduction * self.loss_kld(weight.mu, weight.logvar)
else:
# set_loss = set_loss + self.kl_w * reduction * self.loss_kld(weight, weight.logvar)
set_loss = set_loss + reduction * self.loss_kld(weight, weight.logvar)
grad = torch.autograd.grad(set_loss, fast_parameters, create_graph=True,
allow_unused=True) # build full graph support gradient of gradient
if self.approx:
grad = [g.detach() for g in
grad] # do not calculate gradient of gradient if using first order approximation
if p == 1:
# update weights of classifier network by adding gradient
for k, weight in enumerate(self.classifier.parameters()):
update_value = (self.train_lr * grad[k])
update_mean, logvar = delta_params_list[k]
self._update_weight(weight, update_value, logvar, train_stage)
elif 0.0 < p < 1.0:
# update weights of classifier network by adding gradient and output of hypernetwork
for k, weight in enumerate(self.classifier.parameters()):
update_value = self.train_lr * p * grad[k]
update_mean, logvar = delta_params_list[k]
update_mean = (1 - p) * update_mean + update_value
self._update_weight(weight, update_mean, logvar, train_stage)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_mean, logvar = delta_params_list[k]
self._update_weight(weight, update_mean, logvar, train_stage)
else:
for k, weight in enumerate(self.classifier.parameters()):
update_mean, logvar = delta_params_list[k]
self._update_weight(weight, update_mean, logvar, train_stage)
def _get_list_of_delta_params(self, maml_warmup_used, support_embeddings, support_data_labels):
# if not maml_warmup_used:
if self.enhance_embeddings:
with torch.no_grad():
logits = self.classifier.forward(support_embeddings).detach()
logits = F.softmax(logits, dim=1)
labels = support_data_labels.view(support_embeddings.shape[0], -1)
support_embeddings = torch.cat((support_embeddings, logits, labels), dim=1)
for weight in self.parameters():
weight.fast = None
for weight in self.classifier.parameters():
weight.mu = None
# weight.logvar = None
self.zero_grad()
support_embeddings = self.apply_embeddings_strategy(support_embeddings)
delta_params = self.get_hn_delta_params(support_embeddings)
if self.hm_save_delta_params and len(self.delta_list) == 0:
self.delta_list = [{'delta_params': delta_params}]
return delta_params
def set_forward_loss(self, x):
"""Adapt and forward using x. Return scores and total losses"""
scores, total_delta_sum = self.set_forward(x, is_feature=False, train_stage=True)
# calc_sigma = calc_sigma and (self.epoch == self.stop_epoch - 1 or self.epoch % 100 == 0)
# sigma, mu = self._mu_sigma(calc_sigma)
query_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_query))).cuda()
if self.hm_support_set_loss:
support_data_labels = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)).cuda()
query_data_labels = torch.cat((support_data_labels, query_data_labels))
reduction = self.kl_scale
loss_ce = self.loss_fn(scores, query_data_labels)
loss_kld = torch.zeros_like(loss_ce)
for name, weight in self.classifier.named_parameters():
if weight.mu is not None and weight.logvar is not None:
val = self.loss_kld(weight.mu, weight.logvar)
# loss_kld = loss_kld + self.kl_w * reduction * val
loss_kld = loss_kld + reduction * val
loss = loss_ce + loss_kld
if self.hm_lambda != 0:
loss = loss + self.hm_lambda * total_delta_sum
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = query_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(query_data_labels)) * 100
return loss, loss_ce, loss_kld, task_accuracy
def set_forward_loss_with_adaptation(self, x):
"""returns loss and accuracy from adapted model (copy)"""
scores, _ = self.set_forward(x, is_feature=False, train_stage=False) # scores from adapted copy
support_data_labels = Variable(torch.from_numpy(np.repeat(range(self.n_way), self.n_support))).cuda()
reduction = self.kl_scale
loss_ce = self.loss_fn(scores, support_data_labels)
loss_kld = torch.zeros_like(loss_ce)
for name, weight in self.classifier.named_parameters():
if weight.mu is not None and weight.logvar is not None:
# loss_kld = loss_kld + self.kl_w * reduction * self.loss_kld(weight.mu, weight.logvar)
loss_kld = loss_kld + reduction * self.loss_kld(weight.mu, weight.logvar)
loss = loss_ce + loss_kld
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy().flatten()
y_labels = support_data_labels.cpu().numpy()
top1_correct = np.sum(topk_ind == y_labels)
task_accuracy = (top1_correct / len(support_data_labels)) * 100
return loss, task_accuracy
def train_loop(self, epoch, train_loader, optimizer): # overwrite parrent function
print_freq = 10
avg_loss = 0
task_count = 0
loss_all = []
loss_ce_all = []
loss_kld_all = []
# loss_kld_no_scale_all = []
acc_all = []
optimizer.zero_grad()
self.delta_list = []
# train
for i, (x, _) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
assert self.n_way == x.size(0), "MAML do not support way change"
loss, loss_ce, loss_kld, task_accuracy = self.set_forward_loss(x)
avg_loss = avg_loss + loss.item() # .data[0]
loss_all.append(loss)
loss_ce_all.append(loss_ce.item())
loss_kld_all.append(loss_kld.item())
# loss_kld_no_scale_all.append(loss_kld_no_scale.item())
acc_all.append(task_accuracy)
task_count += 1
if task_count == self.n_task: # MAML update several tasks at one time
loss_q = torch.stack(loss_all).sum(0)
loss_q.backward()
optimizer.step()
task_count = 0
loss_all = []
optimizer.zero_grad()
if i % print_freq == 0:
print('Epoch {:d}/{:d} | Batch {:d}/{:d} | Loss {:f}'.format(self.epoch, self.stop_epoch, i,
len(train_loader),
avg_loss / float(i + 1)))
self._scale_step()
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
metrics = {"accuracy/train": acc_mean}
loss_ce_all = np.asarray(loss_ce_all)
loss_ce_mean = np.mean(loss_ce_all)
metrics["loss_ce"] = loss_ce_mean
loss_kld_all = np.asarray(loss_kld_all)
loss_kld_mean = np.mean(loss_kld_all)
metrics["loss_kld"] = loss_kld_mean
if self.hn_adaptation_strategy == 'increasing_alpha':
metrics['alpha'] = self.alpha
if self.hm_save_delta_params and len(self.delta_list) > 0:
delta_params = {"epoch": self.epoch, "delta_list": self.delta_list}
metrics['delta_params'] = delta_params
if self.alpha < 1:
self.alpha += self.hn_alpha_step
return metrics
def set_forward_with_adaptation(self, x: torch.Tensor):
self_copy = deepcopy(self)
# deepcopy does not copy "fast" parameters so it should be done manually
for param1, param2 in zip(self.feature.parameters(), self_copy.feature.parameters()):
if hasattr(param1, 'fast'):
if param1.fast is not None:
param2.fast = param1.fast.clone()
else:
param2.fast = None
for param1, param2 in zip(self.classifier.parameters(), self_copy.classifier.parameters()):
if hasattr(param1, 'fast'):
if param1.fast is not None:
param2.fast = list(param1.fast)
else:
param2.fast = None
if hasattr(param1, 'mu'):
if param1.mu is not None:
param2.mu = param1.mu.clone()
else:
param2.mu = None
if hasattr(param1, 'logvar'):
if param1.logvar is not None:
param2.logvar = param1.logvar.clone()
else:
param2.logvar = None
metrics = {
"accuracy/val@-0": self_copy.query_accuracy(x)
}
val_opt_type = torch.optim.Adam if self.hn_val_optim == "adam" else torch.optim.SGD
val_opt = val_opt_type(self_copy.parameters(), lr=self.hn_val_lr)
if self.hn_val_epochs > 0:
for i in range(1, self.hn_val_epochs + 1):
self_copy.train()
val_opt.zero_grad()
loss, val_support_acc = self_copy.set_forward_loss_with_adaptation(x)
loss.backward()
val_opt.step()
self_copy.eval()
metrics[f"accuracy/val_support_acc@-{i}"] = val_support_acc
metrics[f"accuracy/val_loss@-{i}"] = loss.item()
metrics[f"accuracy/val@-{i}"] = self_copy.query_accuracy(x)
# free CUDA memory by deleting "fast" parameters
for param in self_copy.parameters():
param.fast = None
param.mu = None
param.logvar = None
return metrics[f"accuracy/val@-{self.hn_val_epochs}"], metrics
| 20,114 | 41.08159 | 147 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/models/gp_kernels.py | import gpytorch
import torch
import torch.nn as nn
import numpy as np
class NNKernel(gpytorch.kernels.Kernel):
def __init__(self, input_dim, output_dim, num_layers, hidden_dim, flatten=False, **kwargs):
super(NNKernel, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
z1 = self.model(x1)
z2 = self.model(x2)
out = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out
class PositiveLinear(nn.Module):
def __init__(self, in_features, out_features):
super(PositiveLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input):
w = nn.functional.softplus(self.weight)
return nn.functional.linear(input, w)
class NNKernelNoInner(gpytorch.kernels.Kernel):
def __init__(self, input_dim, num_layers, hidden_dim, flatten=False, **kwargs):
super(NNKernelNoInner, self).__init__(**kwargs)
self.input_dim = input_dim*2
self.output_dim = 1
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [PositiveLinear(self.input_dim, self.hidden_dim), nn.Sigmoid()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(PositiveLinear(self.hidden_dim, self.hidden_dim))
modules.append(nn.Sigmoid())
modules.append(PositiveLinear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n,m), device=x1.get_device())
for i in range(n):
for j in range(i+1):
out[i, j] = self.model(torch.cat((x1[i], x2[j]))).view(-1)
if i != j:
out[j, i] = out[i, j]
#npout = out.cpu().detach().numpy()
#print(np.linalg.eigvals(npout))
#assert np.all(np.linalg.eigvals(npout) +1e-2 >= 0), "not positive"
if diag:
return torch.diag(out)
else:
return out
class MultiNNKernel(gpytorch.kernels.Kernel):
def __init__(self, num_tasks, kernels, **kwargs):
super(MultiNNKernel, self).__init__(**kwargs)
assert isinstance(kernels, list), "kernels must be a list of kernels"
self.num_tasks = num_tasks
self.kernels = nn.ModuleList(kernels)
def num_outputs_per_input(self, x1, x2):
"""
Given `n` data points `x1` and `m` datapoints `x2`, this multitask
kernel returns an `(n*num_tasks) x (m*num_tasks)` covariance matrix.
"""
return self.num_tasks
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, full_covar=True, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
if last_dim_is_batch:
raise NotImplementedError()
else:
n = x1.shape[0]
m = x2.shape[0]
out = torch.zeros((n * self.num_tasks, m * self.num_tasks), device=x1.get_device())
for i in range(self.num_tasks):
for j in range(self.num_tasks):
z1 = self.kernels[i].model(x1)
z2 = self.kernels[j].model(x2)
out[i:n*self.num_tasks:self.num_tasks, j:m*self.num_tasks:self.num_tasks] = torch.matmul(z1, z2.T)
if diag:
return torch.diag(out)
else:
return out | 8,368 | 39.429952 | 118 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/additional_transforms.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from PIL import ImageEnhance
transformtypedict=dict(Brightness=ImageEnhance.Brightness, Contrast=ImageEnhance.Contrast, Sharpness=ImageEnhance.Sharpness, Color=ImageEnhance.Color)
class ImageJitter(object):
def __init__(self, transformdict):
self.transforms = [(transformtypedict[k], transformdict[k]) for k in transformdict]
def __call__(self, img):
out = img
randtensor = torch.rand(len(self.transforms))
for i, (transformer, alpha) in enumerate(self.transforms):
r = alpha*(randtensor[i]*2.0 -1.0) + 1
out = transformer(out).enhance(r).convert('RGB')
return out
| 850 | 24.787879 | 150 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/feature_loader.py | import torch
import numpy as np
import h5py
class SimpleHDF5Dataset:
def __init__(self, file_handle = None):
if file_handle == None:
self.f = ''
self.all_feats_dset = []
self.all_labels = []
self.total = 0
else:
self.f = file_handle
self.all_feats_dset = self.f['all_feats'][...]
self.all_labels = self.f['all_labels'][...]
self.total = self.f['count'][0]
# print('here')
def __getitem__(self, i):
return torch.Tensor(self.all_feats_dset[i,:]), int(self.all_labels[i])
def __len__(self):
return self.total
def init_loader(filename):
with h5py.File(filename, 'r') as f:
fileset = SimpleHDF5Dataset(f)
#labels = [ l for l in fileset.all_labels if l != 0]
feats = fileset.all_feats_dset
labels = fileset.all_labels
while np.sum(feats[-1]) == 0:
feats = np.delete(feats,-1,axis = 0)
labels = np.delete(labels,-1,axis = 0)
class_list = np.unique(np.array(labels)).tolist()
inds = range(len(labels))
cl_data_file = {}
for cl in class_list:
cl_data_file[cl] = []
for ind in inds:
cl_data_file[labels[ind]].append( feats[ind])
return cl_data_file
| 1,293 | 27.755556 | 78 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/dataset.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import json
import numpy as np
import torchvision.transforms as transforms
import os
identity = lambda x:x
class SimpleDataset:
def __init__(self, data_file, transform, target_transform=identity):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
image_path = os.path.join(self.meta['image_names'][i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.meta['image_labels'][i])
return img, target
def __len__(self):
return len(self.meta['image_names'])
class SetDataset:
def __init__(self, data_file, batch_size, transform):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.cl_list = np.unique(self.meta['image_labels']).tolist()
self.sub_meta = {}
for cl in self.cl_list:
self.sub_meta[cl] = []
for x,y in zip(self.meta['image_names'],self.meta['image_labels']):
self.sub_meta[y].append(x)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size = batch_size,
shuffle = True,
num_workers = 0, #use main thread only or may receive multiple batches
pin_memory = False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform = transform)
self.sub_dataloader.append( torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params) )
def __getitem__(self,i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.cl_list)
class SubDataset:
def __init__(self, sub_meta, cl, transform=transforms.ToTensor(), target_transform=identity ):
self.sub_meta = sub_meta
self.cl = cl
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
#print( '%d -%d' %(self.cl,i))
image_path = os.path.join( self.sub_meta[i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.cl)
return img, target
def __len__(self):
return len(self.sub_meta)
class EpisodicBatchSampler(object):
def __init__(self, n_classes, n_way, n_episodes):
self.n_classes = n_classes
self.n_way = n_way
self.n_episodes = n_episodes
def __len__(self):
return self.n_episodes
def __iter__(self):
for i in range(self.n_episodes):
yield torch.randperm(self.n_classes)[:self.n_way]
| 2,913 | 31.741573 | 108 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/datamgr.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
import data.additional_transforms as add_transforms
from data.dataset import SimpleDataset, SetDataset, EpisodicBatchSampler
from abc import abstractmethod
def _init_fn(worker_id):
np.random.seed(0)
class TransformLoader:
def __init__(self, image_size,
normalize_param = dict(mean= [0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
jitter_param = dict(Brightness=0.4, Contrast=0.4, Color=0.4)):
self.image_size = image_size
self.normalize_param = normalize_param
self.jitter_param = jitter_param
def parse_transform(self, transform_type):
if transform_type=='ImageJitter':
method = add_transforms.ImageJitter( self.jitter_param )
return method
method = getattr(transforms, transform_type)
if transform_type=='RandomResizedCrop':
return method(self.image_size)
elif transform_type=='CenterCrop':
return method(self.image_size)
elif transform_type=='Resize':
return method([int(self.image_size*1.15), int(self.image_size*1.15)])
elif transform_type=='Normalize':
return method(**self.normalize_param )
else:
return method()
def get_composed_transform(self, aug = False):
if aug:
transform_list = ['RandomResizedCrop', 'ImageJitter', 'RandomHorizontalFlip', 'ToTensor', 'Normalize']
else:
transform_list = ['Resize','CenterCrop', 'ToTensor', 'Normalize']
transform_funcs = [ self.parse_transform(x) for x in transform_list]
transform = transforms.Compose(transform_funcs)
return transform
class DataManager:
@abstractmethod
def get_data_loader(self, data_file, aug):
pass
class SimpleDataManager(DataManager):
def __init__(self, image_size, batch_size):
super(SimpleDataManager, self).__init__()
self.batch_size = batch_size
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SimpleDataset(data_file, transform)
data_loader_params = dict(batch_size = self.batch_size, shuffle=True, num_workers = 8, pin_memory=True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
class SetDataManager(DataManager):
def __init__(self, image_size, n_way, n_support, n_query, n_eposide =100):
super(SetDataManager, self).__init__()
self.image_size = image_size
self.n_way = n_way
self.batch_size = n_support + n_query
self.n_eposide = n_eposide
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SetDataset( data_file , self.batch_size, transform )
sampler = EpisodicBatchSampler(len(dataset), self.n_way, self.n_eposide )
data_loader_params = dict(batch_sampler = sampler, num_workers = 8, pin_memory=True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
| 3,560 | 38.566667 | 118 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/data/qmul_loader.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import torchvision.transforms as transforms
from PIL import Image
train_people = ['DennisPNoGlassesGrey','JohnGrey','SimonBGrey','SeanGGrey','DanJGrey','AdamBGrey','JackGrey','RichardHGrey','YongminYGrey','TomKGrey','PaulVGrey','DennisPGrey','CarlaBGrey','JamieSGrey','KateSGrey','DerekCGrey','KatherineWGrey','ColinPGrey','SueWGrey','GrahamWGrey','KrystynaNGrey','SeanGNoGlassesGrey','KeithCGrey','HeatherLGrey']
test_people = ['RichardBGrey','TasosHGrey','SarahLGrey','AndreeaVGrey','YogeshRGrey']
def num_to_str(num):
str_ = ''
if num == 0:
str_ = '000'
elif num < 100:
str_ = '0' + str(int(num))
else:
str_ = str(int(num))
return str_
def get_person_at_curve(person, curve, prefix='filelists/QMUL/images/'):
faces = []
targets = []
train_transforms = transforms.Compose([transforms.ToTensor()])
for pitch, angle in curve:
fname = prefix + person + '/' + person[:-4] + '_' + num_to_str(pitch) + '_' + num_to_str(angle) +'.jpg'
img = Image.open(fname).convert('RGB')
img = train_transforms(img)
faces.append(img)
pitch_norm = 2 * ((pitch - 60) / (120 - 60)) -1
angle_norm = 2 * ((angle - 0) / (180 - 0)) -1
targets.append(torch.Tensor([pitch_norm]))
faces = torch.stack(faces)
targets = torch.stack(targets).squeeze()
return faces, targets
def get_batch(train_people=train_people, num_samples=19):
## generate trajectory
amp = np.random.uniform(-3, 3)
phase = np.random.uniform(-5, 5)
wave = [(amp * np.sin(phase + x)) for x in range(num_samples)]
## map trajectory to angles/pitches
angles = list(range(num_samples))
angles = [x * 10 for x in angles]
pitches = [int(round(((y+3)*10 )+60,-1)) for y in wave]
curve = [(p,a) for p, a in zip(pitches, angles)]
inputs = []
targets = []
for person in train_people:
inps, targs = get_person_at_curve(person, curve)
inputs.append(inps)
targets.append(targs)
return torch.stack(inputs), torch.stack(targets)
| 2,209 | 35.833333 | 347 | py |
AICare | AICare-main/AICare.py | class Sparsemax(nn.Module):
"""Sparsemax function."""
def __init__(self, dim=None):
super(Sparsemax, self).__init__()
self.dim = -1 if dim is None else dim
def forward(self, input, device='cuda'):
original_size = input.size()
input = input.view(-1, input.size(self.dim))
dim = 1
number_of_logits = input.size(dim)
input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(input)
zs = torch.sort(input=input, dim=dim, descending=True)[0]
range = torch.arange(start=1, end=number_of_logits+1, device=device, dtype=torch.float32).view(1, -1)
range = range.expand_as(zs)
bound = 1 + range * zs
cumulative_sum_zs = torch.cumsum(zs, dim)
is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type())
k = torch.max(is_gt * range, dim, keepdim=True)[0]
zs_sparse = is_gt * zs
taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k
taus = taus.expand_as(input)
self.output = torch.max(torch.zeros_like(input), input - taus)
output = self.output.view(original_size)
return output
def backward(self, grad_output):
dim = 1
nonzeros = torch.ne(self.output, 0)
sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim)
self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
return self.grad_input
class SingleAttention(nn.Module):
def __init__(self, attention_input_dim, attention_hidden_dim, attention_type='add', demographic_dim=12, time_aware=False, use_demographic=False):
super(SingleAttention, self).__init__()
self.attention_type = attention_type
self.attention_hidden_dim = attention_hidden_dim
self.attention_input_dim = attention_input_dim
self.use_demographic = use_demographic
self.demographic_dim = demographic_dim
self.time_aware = time_aware
# batch_time = torch.arange(0, batch_mask.size()[1], dtype=torch.float32).reshape(1, batch_mask.size()[1], 1)
# batch_time = batch_time.repeat(batch_mask.size()[0], 1, 1)
if attention_type == 'add':
if self.time_aware == True:
# self.Wx = nn.Parameter(torch.randn(attention_input_dim+1, attention_hidden_dim))
self.Wx = nn.Parameter(torch.randn(attention_input_dim, attention_hidden_dim))
self.Wtime_aware = nn.Parameter(torch.randn(1, attention_hidden_dim))
nn.init.kaiming_uniform_(self.Wtime_aware, a=math.sqrt(5))
else:
self.Wx = nn.Parameter(torch.randn(attention_input_dim, attention_hidden_dim))
self.Wt = nn.Parameter(torch.randn(attention_input_dim, attention_hidden_dim))
self.Wd = nn.Parameter(torch.randn(demographic_dim, attention_hidden_dim))
self.bh = nn.Parameter(torch.zeros(attention_hidden_dim,))
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wd, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wx, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wt, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
elif attention_type == 'mul':
self.Wa = nn.Parameter(torch.randn(attention_input_dim, attention_input_dim))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
elif attention_type == 'concat':
if self.time_aware == True:
self.Wh = nn.Parameter(torch.randn(2*attention_input_dim+1, attention_hidden_dim))
else:
self.Wh = nn.Parameter(torch.randn(2*attention_input_dim, attention_hidden_dim))
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wh, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
else:
raise RuntimeError('Wrong attention type.')
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, input, demo=None):
batch_size, time_step, input_dim = input.size() # batch_size * time_step * hidden_dim(i)
#assert(input_dim == self.input_dim)
# time_decays = torch.zeros((time_step,time_step)).to(device)# t*t
# for this_time in range(time_step):
# for pre_time in range(time_step):
# if pre_time > this_time:
# break
# time_decays[this_time][pre_time] = torch.tensor(this_time - pre_time, dtype=torch.float32).to(device)
# b_time_decays = tile(time_decays, 0, batch_size).view(batch_size,time_step,time_step).unsqueeze(-1).to(device)# b t t 1
time_decays = torch.tensor(range(47,-1,-1), dtype=torch.float32).unsqueeze(-1).unsqueeze(0).to(self.device)# 1*t*1
b_time_decays = time_decays.repeat(batch_size,1,1)# b t 1
if self.attention_type == 'add': #B*T*I @ H*I
q = torch.matmul(input[:,-1,:], self.Wt)# b h
q = torch.reshape(q, (batch_size, 1, self.attention_hidden_dim)) #B*1*H
if self.time_aware == True:
# k_input = torch.cat((input, time), dim=-1)
k = torch.matmul(input, self.Wx)#b t h
# k = torch.reshape(k, (batch_size, 1, time_step, self.attention_hidden_dim)) #B*1*T*H
time_hidden = torch.matmul(b_time_decays, self.Wtime_aware)# b t h
else:
k = torch.matmul(input, self.Wx)# b t h
# k = torch.reshape(k, (batch_size, 1, time_step, self.attention_hidden_dim)) #B*1*T*H
if self.use_demographic == True:
d = torch.matmul(demo, self.Wd) #B*H
d = torch.reshape(d, (batch_size, 1, self.attention_hidden_dim)) # b 1 h
h = q + k + self.bh # b t h
if self.time_aware == True:
h += time_hidden
h = self.tanh(h) #B*T*H
e = torch.matmul(h, self.Wa) + self.ba #B*T*1
e = torch.reshape(e, (batch_size, time_step))# b t
elif self.attention_type == 'mul':
e = torch.matmul(input[:,-1,:], self.Wa)#b i
e = torch.matmul(e.unsqueeze(1), input.permute(0,2,1)).squeeze() + self.ba #b t
elif self.attention_type == 'concat':
q = input[:,-1,:].unsqueeze(1).repeat(1,time_step,1)# b t i
k = input
c = torch.cat((q, k), dim=-1) #B*T*2I
if self.time_aware == True:
c = torch.cat((c, b_time_decays), dim=-1) #B*T*2I+1
h = torch.matmul(c, self.Wh)
h = self.tanh(h)
e = torch.matmul(h, self.Wa) + self.ba #B*T*1
e = torch.reshape(e, (batch_size, time_step)) # b t
a = self.softmax(e) #B*T
v = torch.matmul(a.unsqueeze(1), input).squeeze() #B*I
return v, a
class FinalAttentionQKV(nn.Module):
def __init__(self, attention_input_dim, attention_hidden_dim, attention_type='add', dropout=None):
super(FinalAttentionQKV, self).__init__()
self.attention_type = attention_type
self.attention_hidden_dim = attention_hidden_dim
self.attention_input_dim = attention_input_dim
self.W_q = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_k = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_v = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_out = nn.Linear(attention_hidden_dim, 1)
self.b_in = nn.Parameter(torch.zeros(1,))
self.b_out = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.W_q.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_k.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_v.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_out.weight, a=math.sqrt(5))
self.Wh = nn.Parameter(torch.randn(2*attention_input_dim, attention_hidden_dim))
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(torch.zeros(1,))
nn.init.kaiming_uniform_(self.Wh, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
self.dropout = nn.Dropout(p=dropout)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
self.sparsemax = Sparsemax()
def forward(self, input):
batch_size, time_step, input_dim = input.size() # batch_size * input_dim + 1 * hidden_dim(i)
input_q = self.W_q(torch.mean(input,1)) # b h
input_k = self.W_k(input[:,:-1,:])# b t h
input_v = self.W_v(input[:,:-1,:])# b t h
if self.attention_type == 'add': #B*T*I @ H*I
q = torch.reshape(input_q, (batch_size, 1, self.attention_hidden_dim)) #B*1*H
h = q + input_k + self.b_in # b t h
h = self.tanh(h) #B*T*H
e = self.W_out(h) # b t 1
e = torch.reshape(e, (batch_size, time_step))# b t
elif self.attention_type == 'mul':
q = torch.reshape(input_q, (batch_size, self.attention_hidden_dim, 1)) #B*h 1
e = torch.matmul(input_k, q).squeeze()#b t
elif self.attention_type == 'concat':
q = input_q.unsqueeze(1).repeat(1,time_step,1)# b t h
k = input_k
c = torch.cat((q, k), dim=-1) #B*T*2I
h = torch.matmul(c, self.Wh)
h = self.tanh(h)
e = torch.matmul(h, self.Wa) + self.ba #B*T*1
e = torch.reshape(e, (batch_size, time_step)) # b t
a = self.softmax(e) #B*T
if self.dropout is not None:
a = self.dropout(a)
v = torch.matmul(a.unsqueeze(1), input_v).squeeze() #B*I
return v, a
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def tile(a, dim, n_tile):
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).to(self.device)
return torch.index_select(a, dim, order_index).to(self.device)
class PositionwiseFeedForward(nn.Module): # new added
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x)))), None
class PositionalEncoding(nn.Module): # new added / not use anymore
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=400):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0 # 下三角矩阵
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)# b h t d_k
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k) # b h t t
if mask is not None:# 1 1 t t
scores = scores.masked_fill(mask == 0, -1e9)# b h t t 下三角
p_attn = F.softmax(scores, dim = -1)# b h t t
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn # b h t v (d_k)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, self.d_k * self.h), 3)
self.final_linear = nn.Linear(d_model, d_model)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1) # 1 1 t t
nbatches = query.size(0)# b
input_dim = query.size(1)# i+1
feature_dim = query.size(-1)# i+1
#input size -> # batch_size * d_input * hidden_dim
# d_model => h * d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))] # b num_head d_input d_k
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)# b num_head d_input d_v (d_k)
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)# batch_size * d_input * hidden_dim
#DeCov
DeCov_contexts = x.transpose(0, 1).transpose(1, 2) # I+1 H B
# print(DeCov_contexts.shape)
Covs = cov(DeCov_contexts[0,:,:])
DeCov_loss = 0.5 * (torch.norm(Covs, p = 'fro')**2 - torch.norm(torch.diag(Covs))**2 )
for i in range(17+1 -1):
Covs = cov(DeCov_contexts[i+1,:,:])
DeCov_loss += 0.5 * (torch.norm(Covs, p = 'fro')**2 - torch.norm(torch.diag(Covs))**2 )
return self.final_linear(x), DeCov_loss
class LayerNorm(nn.Module):
def __init__(self, size, eps=1e-7):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(size))
self.b_2 = nn.Parameter(torch.zeros(size))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def cov(m, y=None):
if y is not None:
m = torch.cat((m, y), dim=0)
m_exp = torch.mean(m, dim=1)
x = m - m_exp[:, None]
cov = 1 / (x.size(1) - 1) * x.mm(x.t())
return cov
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
returned_value = sublayer(self.norm(x))
return x + self.dropout(returned_value[0]) , returned_value[1]
class AICare(nn.Module):
def __init__(self, input_dim=17, hidden_dim=32, d_model=32, MHD_num_head=4, d_ff=64, output_dim=1, device='cuda', keep_prob=0.5):
super(AICare, self).__init__()
# hyperparameters
self.input_dim = input_dim
self.hidden_dim = hidden_dim # d_model
self.d_model = d_model
self.MHD_num_head = MHD_num_head
self.device = device
self.d_ff = d_ff
self.output_dim = output_dim
self.keep_prob = keep_prob
# layers
self.PositionalEncoding = PositionalEncoding(self.d_model, dropout = 0, max_len = 400)
# self.GRUs = clones(nn.GRU(1, self.hidden_dim, batch_first = True), self.input_dim)
self.GRUs = clones(nn.RNN(1, self.hidden_dim, bidirectional = True, batch_first = True), self.input_dim)
self.LastStepAttentions = clones(SingleAttention(self.hidden_dim, 8, attention_type='concat', demographic_dim=12, time_aware=True, use_demographic=False),self.input_dim)
self.FinalAttentionQKV = FinalAttentionQKV(self.hidden_dim, self.hidden_dim, attention_type='mul',dropout = 1 - self.keep_prob)
self.MultiHeadedAttention = MultiHeadedAttention(self.MHD_num_head, self.d_model,dropout = 1 - self.keep_prob)
self.SublayerConnection = SublayerConnection(self.d_model, dropout = 1 - self.keep_prob)
self.PositionwiseFeedForward = PositionwiseFeedForward(self.d_model, self.d_ff, dropout=0.1)
self.demo_proj_main = nn.Linear(4, self.hidden_dim)
self.demo_proj = nn.Linear(4, self.hidden_dim)
self.output = nn.Linear(self.hidden_dim*2, self.output_dim)
self.dropout = nn.Dropout(p = 1 - self.keep_prob)
self.tanh=nn.Tanh()
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
self.relu=nn.ReLU()
def forward(self, input, demo_input, lens):
# input shape [batch_size, timestep, feature_dim]
demo_main = self.tanh(self.demo_proj_main(demo_input)).unsqueeze(1)# b hidden_dim
batch_size = input.size(0)
time_step = input.size(1)
feature_dim = input.size(2)
assert(feature_dim == self.input_dim)# input Tensor : 256 * 48 * 76
assert(self.d_model % self.MHD_num_head == 0)
GRU_embeded_input = torch.sum(self.GRUs[0](pack_padded_sequence(input[:,:,0].unsqueeze(-1), lens.cpu(), batch_first=True, enforce_sorted=False))[1], 0).squeeze().unsqueeze(1) # b 1 h
# print(GRU_embeded_input.shape)
for i in range(feature_dim-1):
embeded_input = torch.sum(self.GRUs[i+1](pack_padded_sequence(input[:,:,i+1].unsqueeze(-1), lens.cpu(), batch_first=True, enforce_sorted=False))[1], 0).squeeze().unsqueeze(1) # b 1 h
GRU_embeded_input = torch.cat((GRU_embeded_input, embeded_input), 1)
# print(demo_main.shape)
GRU_embeded_input = torch.cat((GRU_embeded_input, demo_main), 1)# b i+1 h
posi_input = self.dropout(GRU_embeded_input) # batch_size * d_input * hidden_dim
weighted_contexts = self.FinalAttentionQKV(posi_input)[0]
combined_hidden = torch.cat((weighted_contexts, \
demo_main.squeeze(1)),-1)#b n h
output = self.output(self.dropout(combined_hidden))# b 1
output = self.sigmoid(output)
return output
| 19,067 | 42.042889 | 195 | py |
GANFingerprints | GANFingerprints-master/classifier/nets/resnet_utils.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None,
store_non_strided_activations=False,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
# Move stride from the block's last unit to the end of the block.
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
# Subsampling of the block's output activations.
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| 11,901 | 42.123188 | 80 | py |
GANFingerprints | GANFingerprints-master/classifier_visNet/nets/resnet_utils.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None,
store_non_strided_activations=False,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
# Move stride from the block's last unit to the end of the block.
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
# Subsampling of the block's output activations.
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| 11,901 | 42.123188 | 80 | py |
SpectralANN | SpectralANN-main/MonteCarloTest.py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 11:22:43 2021
@author: Thibault
"""
import torch
from ACANN import ACANN
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import inputParameters as config
import pandas as pd
from matplotlib.legend_handler import HandlerTuple
from torch.utils.data import TensorDataset
from scipy import integrate
from scipy.interpolate import interp1d
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
#Load test data
path = "C:/Users/Thibault/Documents/Universiteit/Honours/Deel 2, interdisciplinair/Code/NN/MonteCarloDataset/"
filename = "gluon_bare_64x4_1801Conf_20000bootsamples.dat"
# filename = "gluon_bare_80x4_1801Conf_18010bootsamples.dat"
x = np.loadtxt(path+filename)
#Load input parameters from inputParameters.py
nbrWs = config.nbrWs
nbrOfPoles = config.nbrOfPoles
sizeOfTraining = config.trainingPoints
sizeOfValidation = config.validationPoints
outputSize = nbrWs + (4 * nbrOfPoles) + 1
pstart = config.pstart
pend = config.pend
nbrPoints = config.nbrPoints
inputSize=nbrPoints
print("NN input size {}, output size {} plus {} poles and sigma".format(inputSize,outputSize-4*nbrOfPoles-1,nbrOfPoles))
ps = np.linspace(pstart,pend,nbrPoints)
psInterp = np.linspace(-1,1,nbrPoints)
ws = np.linspace(0.01,10,nbrWs)
# #Load the saved NN model (made in train_ACANN.py)
saved = "savedNNmodel.pth"
#Note: Make sure the dimensions are the same
model = ACANN(inputSize,outputSize,6*[600],drop_p=0.1).double()
model.load_state_dict(torch.load(saved))
model.eval()
#Parse text file:
propLists = []
currentList = []
lastP = 0
for i in range(len(x)):
if x[i][0] < lastP-7:
currentList.sort(key=lambda x: x[0])
currentList.append([10,0,currentList[-1][2]])
propLists.append(currentList)
currentList = [x[i]]
lastP = 0
else:
currentList.append(x[i])
lastP = x[i][0]
#Remove first (has more datapoints):
propLists = propLists[1:]
print("Loaded propagators")
# maxAmount = 50
actualPropagators = []
NNinputs = []
for i in range(len(propLists)):
psT = [item[0] for item in propLists[i]]
dp2snoscale = [item[1] for item in propLists[i]]
dp2sFunc = interp1d(psT,dp2snoscale)
dp2sInter = dp2sFunc(ps)
dp2s = [item/(dp2sInter[12]) for item in dp2sInter]
actualPropagators.append(dp2s)
NNinputs.append(dp2s)
# if i > maxAmount:
# break
NNinputs = pd.DataFrame(NNinputs)
testloader = DataLoader(TensorDataset((torch.tensor(NNinputs.values).double()).to("cuda:0")))
iterloader = iter(testloader)
print("Data Loaded")
#Use NN to predict
predicList = []
with torch.no_grad():
for i in range(len(testloader)):
propData = next(iterloader)[0]
prediction = model.forward(propData)
predicData = prediction.to("cpu").numpy()
predicList.append(predicData)
predicData = []
for i in range(len(predicList)):
predicData.append(predicList[i][0])
def plotPolesForIndex(i,ax):
polemarkers = ["o","^","*"]
msizes = [7,9,11]
for j in range(nbrOfPoles):
#Only plot the poles
cj = predicData[i][nbrWs + 4*j + 2]
dj = predicData[i][nbrWs + 4*j + 3]
ax.plot(cj,dj,polemarkers[j],color="cyan",label="Reconstructed poles",markersize=msizes[j])
ax.grid()
ax.set_xlabel("Re(q)")
ax.set_ylabel("Im(q)")
ax.set_xlim([0.15,0.4])
ax.set_ylim([0.2,0.8])
def plotResiduesForIndex(i,ax):
resmarkers = ["o","^","*"]
msizes = [7,9,11]
for j in range(nbrOfPoles):
#Only plot the poles
aj = predicData[i][nbrWs + 4*j]
bj = predicData[i][nbrWs + 4*j + 1]
ax.plot(aj,bj,marker=resmarkers[j],color="lawngreen",label="Reconstructed residues",markersize=msizes[j])
ax.grid()
ax.set_xlabel("Re(R)")
ax.set_ylabel("Im(R)")
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-0.2,1.2])
#Reconstruct propagator from reconstructed spectral function and poles:
def poles(p,N,poleList):
jsum = 0
for j in range(N):
a = poleList[j*4]
b = poleList[j*4+1]
c = poleList[j*4+2]
d = poleList[j*4+3]
nom = 2*(a*(c+p)+b*d)
denom = c**2 + 2*c*p + d**2 + p**2
jsum += nom/denom
return jsum
def reconstructProp(index):
sigma = predicData[index][-1]
wscutoff = min(range(len(ws)), key=lambda i: abs(ws[i]-sigma))
reconstructedPropSigma = []
for p in ps:
spectrFunc = []
for i in range(wscutoff,len(ws)):
spectrFunc.append(predicData[index][i]/(p**2+ws[i]))
integral = integrate.simpson(spectrFunc,x=ws[wscutoff:])
prop = integral + poles(p**2,3, predicData[index][nbrWs:nbrWs+12])
reconstructedPropSigma.append(prop)
rescaling = reconstructedPropSigma[12]
for i in range(len(ps)):
reconstructedPropSigma[i] = reconstructedPropSigma[i]/rescaling
return reconstructedPropSigma
def constraint15(index):
sigma = predicData[index][-1]
wscutoff = min(range(len(ws)), key=lambda i: abs(ws[i]-sigma))
res = integrate.simpson(predicData[index][wscutoff:nbrWs],x=ws[wscutoff:])
jsum = 0
for j in range(3):
jsum += 2 * predicData[index][nbrWs+j*4]
res += jsum
if res < 0.5 and res > -0.5:
return True
return False
def derivativeconstraint(index):
dp0 = actualPropagators[index][0]
dp1 = actualPropagators[index][1]
rho0 = predicData[index][0]
rho1 = predicData[index][1]
derivativeRho = (rho1 - rho0)/(ws[1]-ws[0])
derivativeProp = (dp1 - dp0)/(ps[1]**2-ps[0]**2)
# print((ps[1]**2-ps[0]**2),(ps[1]-ps[0]))
derivativePoles = 0
for j in range(3):
a = predicData[index][nbrWs+j*4]
b = predicData[index][nbrWs+j*4+1]
c = predicData[index][nbrWs+j*4+2]
d = predicData[index][nbrWs+j*4+3]
#Alternate but equivalent formula:
derivativePoles += (-2*a*c**2 + 2*a*d**2 + 4*b*c*d)/((c**2 + d**2)**2)
idealDerivative = -np.pi*derivativeRho - derivativePoles
# idealDerivative = -derivativePoles
if derivativeProp < idealDerivative - 1 or \
derivativeProp > idealDerivative + 1:
return False
return True
def positivepropconstraint(index):
if min(reconstructProp(index)) < 0:
return False
return True
def testConstraints(index):
if constraint15(index) and derivativeconstraint(index) and positivepropconstraint(index):
return True
return False
getBestAndWorst = True
if getBestAndWorst:
#Get the best and worst test cases:
maxMAEindex = 0
maxMAE = 0
minMAEindex = 0
minMAE = 100000
constraintsSatisfied1 = 0
constraintsSatisfied2 = 0
constraintsSatisfied3 = 0
constraintsSatisfiedAll = 0
fullSortedList = []
reconProps = []
for i in range(len(actualPropagators)):
MAE = 0
#MAE on propagator
reconProp = reconstructProp(i)
reconProps.append(reconProp)
combListProp = zip(actualPropagators[i],reconProp)
for orig, recon in combListProp:
MAE += abs(orig-recon)
if MAE > maxMAE:
maxMAE = MAE
maxMAEindex = i
if MAE < minMAE:
minMAE = MAE
minMAEindex = i
fullSortedList.append((MAE,i))
if positivepropconstraint(i):
constraintsSatisfied1 += 1
if derivativeconstraint(i):
constraintsSatisfied2 += 1
if constraint15(i):
constraintsSatisfied3 += 1
if testConstraints(i):
constraintsSatisfiedAll += 1
print(str(constraintsSatisfied1)+"/"+str(len(NNinputs)), "recons satisfied constraint 1")
print(str(constraintsSatisfied2)+"/"+str(len(NNinputs)), "recons satisfied constraint 2")
print(str(constraintsSatisfied3)+"/"+str(len(NNinputs)), "recons satisfied constraint 3")
print(str(constraintsSatisfiedAll)+"/"+str(len(NNinputs)), "recons satisfied all constraints")
fullSortedList.sort()
# print("Sorted all rhos")
print("Min. MAE:",minMAE)
print("Max. MAE:",maxMAE)
percentile50th = fullSortedList[round(2*len(fullSortedList)/4)][1]
print("index of the best, median, and worst:", \
[minMAEindex,percentile50th,maxMAEindex])
fig, ((ax11,ax12,ax13,ax14),(ax31,ax32,ax33,ax34), \
(ax51,ax52,ax53,ax54)) = plt.subplots(3,4)
plotPolesForIndex(minMAEindex, ax13)
plotPolesForIndex(percentile50th,ax33)
plotPolesForIndex(maxMAEindex, ax53)
plotResiduesForIndex(minMAEindex, ax14)
plotResiduesForIndex(percentile50th,ax34)
plotResiduesForIndex(maxMAEindex, ax54)
indices = [minMAEindex,percentile50th,maxMAEindex]
propaxes = [ax11,ax31,ax51]
ps = np.linspace(pstart,pend,nbrPoints)
for i in range(len(propaxes)):
propaxes[i].plot(ps,actualPropagators[indices[i]],label="Propagator")
propaxes[i].plot(ps,reconstructProp(indices[i]),"--",label="Reconstructed propagator",color="red")
propaxes[i].set_xlabel("p")
propaxes[i].set_ylabel("D(p²)")
rhoaxes = [ax12,ax32,ax52]
for i in range(len(rhoaxes)):
#rhoaxes[i].plot(ws,predicData[indices[i]][:nbrWs],"--",label="Reconstructed spectral function",color="red")
sigma = predicData[indices[i]][-1]
rhoaxes[i].axvline(x=sigma,ymin=-10,ymax=10,color='orangered',linestyle='dotted',label='σ')
rhoaxes[i].plot(ws,predicData[indices[i]][:nbrWs],"--",label="Reconstructed spectral function",color="red")
sigma = predicData[indices[i]][-1]
rhoaxes[i].set_xlabel("ω²")
rhoaxes[i].set_ylabel("ρ(ω)")
handles, labels = ax11.get_legend_handles_labels()
ax11.legend(handles,labels,loc="upper center",bbox_to_anchor=(0.5,1.5))
# handles, labels = ax12.get_legend_handles_labels()
# ax12.legend(handles,labels,loc="upper center",bbox_to_anchor=(0.5,1.5))
handles,labels = ax12.get_legend_handles_labels()
reconTuple = (handles[1],handles[0])
labels = ["Reconstructed spectral function, σ"]
ax12.legend((reconTuple,),labels,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=2,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.5),handlelength=4)
handles,labels = ax13.get_legend_handles_labels()
reconTuple = (handles[0],handles[1],handles[2])
labels = ["Reconstructed poles"]
ax13.legend((reconTuple,),labels,scatterpoints=3,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=3,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.5),handlelength=4)
handles,labels = ax14.get_legend_handles_labels()
reconTuple = (handles[0],handles[1],handles[2])
labels = ["Reconstructed residues"]
ax14.legend((reconTuple,),labels,scatterpoints=3,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=3,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.5),handlelength=4)
| 11,372 | 29.328 | 120 | py |
SpectralANN | SpectralANN-main/train_ACANN.py | from ACANN import ACANN
from Database import Database
from torch.nn.modules.loss import KLDivLoss,L1Loss,MSELoss
from torch.optim import Adam,Rprop,Adamax, RMSprop,SGD,LBFGS,AdamW
from torch.utils.data import DataLoader
import torch
import inputParameters as config
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
#Load input parameters from inputParameters.py
inputSize = config.nbrPoints
nbrWs = config.nbrWs
nbrOfPoles = config.nbrOfPoles
sizeOfTraining = config.trainingPoints
sizeOfValidation = config.validationPoints
outputSize = nbrWs + (4 * nbrOfPoles) + 1
print("outputsize:",outputSize)
print("Input parameters loaded")
print("Starting ACANN")
# Create the network
MAEsplot = []
# layers = [2,4,6,8,10]
# neuronsPerLayer = [200,400,600,800]
layers = [6]
neuronsPerLayer = [600]
for layer in layers:
for neuronNbr in neuronsPerLayer:
print(layer,"layers",neuronNbr,"neurons per layer")
model = ACANN(inputSize,outputSize,layer*[neuronNbr],drop_p=0.1).double()
epochs = 100
batch_size_train = 100
# Import the data
path = "C:/Users/Thibault/Documents/Universiteit/Honours/Deel 2, interdisciplinair/Code/NN/Datasets/"
train_data = Database(csv_target= path + "rhoTraining.csv",csv_input= path + "DTraining.csv",nb_data=epochs*batch_size_train).get_loader()
validation_data=Database(csv_target= path + "rhoValidation.csv",csv_input= path + "DValidation.csv",nb_data=sizeOfValidation).get_loader()
trainloader = DataLoader(train_data,batch_size=batch_size_train,shuffle=True)
validationloader = DataLoader(validation_data,batch_size=batch_size_train,shuffle=True)
# Define a function for computing the validation score
def validation_score(nn_model):
#Turn evaluation mode on (turns off dropout and batch normalization):
nn_model.eval()
#Loss function
val_error=MSELoss()
#Turn off gradient computation
with torch.no_grad():
G_val,A_val=next(iter(validationloader))
prediction=nn_model.forward(G_val)
#Prediction and A_val are tensors with lists of size batch_size
#The lists contain (outputSize) data values
score=val_error(prediction,A_val)
#Turn training mode back on
nn_model.train()
return score.item()
#Define the loss
error = MSELoss()
#Define the optimizer
optimizer = Adam(model.parameters())
# Training parameters
step=-1
print_every = epochs
# Training
best_valscore = 10000
MAEs = []
stop = False
for e in range(epochs):
if not stop:
#Turn training mode on
model.train()
# Load a minibatch
for D,rho in trainloader:
step+=1
# restart the optimizer
optimizer.zero_grad()
# compute the loss
prediction = model.forward(D)
# print(prediction)
loss = error(prediction,rho)
# Compute the gradient and optimize
loss.backward()
optimizer.step()
# Write the result
if step % print_every == 0:
step=0
print("Epoch {}/{} : ".format(e+1,epochs),
"Training MAE = {} -".format(loss.item()),
"Validation MAE = {}".format(validation_score(model)))
MAEs.append(validation_score(model))
print("Saved model with validation MAE of", validation_score(model))
torch.save(model.state_dict(),'savedNNmodel.pth')
plt.figure()
plt.plot(list(range(1,len(MAEs)+1)),MAEs)
plt.title("Validation loss")
plt.xlabel("Epochs")
plt.ylabel("MSE")
MAEsplot.append(MAEs[-1])
print(MAEsplot)
| 4,320 | 35.008333 | 146 | py |
SpectralANN | SpectralANN-main/robustnessCheck.py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 2 17:54:33 2021
@author: Thibault
"""
#1: add noise 20 times to same propagator
#2: Convert to correct input
#3: Input to NN
#4: Calc average and stddev of spectral functions
indices = [227, 1552, 112, 1243, 606]
nbrOfSamples = 100
noiseSize = 1e-2
from Database import Database
from torch.utils.data import DataLoader
import inputParameters as config
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.legend_handler import HandlerTuple
np.random.seed(64)
#Load input parameters from inputParameters.py
sizeOfTraining = config.trainingPoints
sizeOfValidation = config.validationPoints
pstart = config.pstart
pend = config.pend
nbrPoints = config.nbrPoints
path = "C:/Users/Thibault/Documents/Universiteit/\
Honours/Deel 2, interdisciplinair/Code/NN/Datasets/"
train_data = Database(csv_target= path + "rhoTraining.csv",\
csv_input= path + "DTrainingRaw.csv",nb_data=sizeOfTraining).get_loader()
trainloader = DataLoader(train_data,batch_size=sizeOfTraining)
print("Training data loaded")
test_data = Database(csv_target= path + "rhoTest.csv",\
csv_input= path + "DTestRaw.csv",nb_data=sizeOfValidation).get_loader()
testloader = DataLoader(test_data,batch_size=sizeOfValidation)
print("Test data loaded")
#get propagator data:
alldatatensors = list(trainloader)
alldata = alldatatensors[0][0].to("cpu").numpy()
alldatatensorsTest = list(testloader)
alldataTest = alldatatensorsTest[0][0].to("cpu").numpy()
print(len(alldata),"training points")
path = "C:/Users/Thibault/Documents/Universiteit/Honours/Deel 2, interdisciplinair/Code/NN/Datasets/"
"""
########################
Test robustness of NN:
########################
"""
import torch
from ACANN import ACANN
from Database import Database
from torch.utils.data import DataLoader
import numpy as np
import inputParameters as config
#Load input parameters from inputParameters.py
nbrWs = config.nbrWs
nbrOfPoles = config.nbrOfPoles
sizeOfTraining = config.trainingPoints
sizeOfValidation = config.validationPoints
outputSize = nbrWs + (4 * nbrOfPoles) + 1
pstart = config.pstart
pend = config.pend
nbrPoints = config.nbrPoints
inputSize = nbrPoints
print("NN input size {}, output size {} plus {} poles".format(inputSize,outputSize-4*nbrOfPoles,nbrOfPoles))
#Load the saved NN model (made in train_ACANN.py)
# saved = "savedNNmodel.pth"
# #Note: Make sure the dimensions are the same
# model = ACANN(inputSize,outputSize,6*[800],drop_p=0.05).double()
saved = "savedNNmodel.pth"
#Note: Make sure the dimensions are the same
model = ACANN(inputSize,outputSize,6*[600],drop_p=0.1).double()
model.load_state_dict(torch.load(saved))
model.eval()
#Load test data
path = "C:/Users/Thibault/Documents/Universiteit/Honours/Deel 2, interdisciplinair/Code/NN/Datasets/"
test_data = Database(csv_target= path + "rhoTest.csv", \
csv_input= path + "DTestRaw.csv",nb_data=sizeOfValidation).get_loader()
testloader = DataLoader(test_data,batch_size=sizeOfValidation)
testloadList = list(testloader)
rhovaluesList = testloadList[0][1].to("cpu").numpy()
print(len(rhovaluesList),"testing points")
prop_data = pd.read_csv(path+'DTestRaw.csv',header=None,nrows=sizeOfValidation)
propList = prop_data.values.tolist()
print(len(propList),"propagators")
params_data = pd.read_csv(path+'params.csv',header=None,nrows=sizeOfTraining+2*sizeOfValidation)
paramsList = params_data.values.tolist()
print("Data Loaded")
#Evaluate output:
ps = np.linspace(pstart,pend,nbrPoints)
ws = np.linspace(0.01,10,nbrWs)
def getMeanAndStdReconstruction(index):
noisyPropsPerIndex = []
for j in range(nbrOfSamples):
noise = np.random.normal(1,noiseSize,nbrPoints)
noisyPropsPerIndex.append(alldataTest[index] * noise)
NNinputsTensor = []
for j in range(nbrOfSamples):
NNinputsTensor.append(noisyPropsPerIndex[j])
NNinputsTensor = torch.DoubleTensor(np.array(NNinputsTensor)).cuda()
#Use NN to predict
with torch.no_grad():
prediction = model.forward(NNinputsTensor)
predicData = prediction.to("cpu").numpy()
filteredPredicData = []
for j in range(nbrOfSamples):
if max(predicData[j][:nbrWs]) < 10:
filteredPredicData.append(predicData[j])
poles = []
for j in range(len(filteredPredicData)):
poles.append(filteredPredicData[j][nbrWs:])
means = np.mean(filteredPredicData,axis=0)[:nbrWs]
stddevs = 5*np.std(filteredPredicData,axis=0)[:nbrWs]
props = []
for j in range(len(filteredPredicData)):
props.append(reconstructProp(filteredPredicData[j]))
propmeans = np.mean(props,axis=0)
propstddevs = 5*np.std(props,axis=0)
return means, stddevs, poles, propmeans, propstddevs
def poles(p,N,poleList):
jsum = 0
for j in range(N):
a = poleList[j*4]
b = poleList[j*4+1]
c = poleList[j*4+2]
d = poleList[j*4+3]
nom = 2*(a*(c+p)+b*d)
denom = c**2 + 2*c*p + d**2 + p**2
jsum += nom/denom
return jsum
def reconstructProp(reconstruction):
from scipy import integrate
sigma = reconstruction[-1]
#If negative, -> 0
#If more than 1 -> 1
wscutoff = min(range(len(ws)), key=lambda i: abs(ws[i]-sigma))
reconstructedPropSigma = []
for p in ps:
spectrFunc = []
for i in range(wscutoff,len(ws)):
spectrFunc.append(reconstruction[i]/(p**2+ws[i]))
integral = integrate.simpson(spectrFunc,x=ws[wscutoff:])
prop = integral + poles(p**2,3, reconstruction[nbrWs:nbrWs+12])
reconstructedPropSigma.append(prop)
rescaling = reconstructedPropSigma[12]
for i in range(len(ps)):
reconstructedPropSigma[i] = reconstructedPropSigma[i]/rescaling
return reconstructedPropSigma
fig, ((ax11,ax12,ax13,ax14),(ax21,ax22,ax23,ax24),(ax31,ax32,ax33,ax34), \
(ax41,ax42,ax43,ax44),(ax51,ax52,ax53,ax54)) = plt.subplots(5,4)
propaxes = [ax11,ax21,ax31,ax41,ax51]
spectralaxes = [ax12,ax22,ax32,ax42,ax52]
polesaxes = [ax13,ax23,ax33,ax43,ax53]
resaxes = [ax14,ax24,ax34,ax44,ax54]
for i in range(len(indices)):
means,stddevs,polesAndSigma,propmeans,propstddevs = getMeanAndStdReconstruction(indices[i])
propaxes[i].plot(ps,propList[indices[i]],label="Propagator")
propaxes[i].plot(ps,propmeans,"--",label="Mean reconstruction",color="red")
propaxes[i].fill_between(ps,propmeans-propstddevs,propmeans+propstddevs,alpha=0.2, facecolor="red",
label='5*(Standard deviation)')
propaxes[i].set_xlabel("p")
propaxes[i].set_ylabel("D(p²)")
# propaxes[i].set_xscale('log')
#Plot spectral function:
spectralaxes[i].set_xlabel("ω²")
spectralaxes[i].set_ylabel("ρ(ω)")
meansig = np.mean(polesAndSigma,axis=0)[-1]
stdsig = np.std(polesAndSigma,axis=0)[-1]
spectralaxes[i].axvline(x=meansig,ymin=-10,ymax=10,color='orangered',linestyle='dotted')
spectralaxes[i].axvspan(meansig-5*stdsig,meansig+5*stdsig,alpha=0.2, facecolor="red")
sigma = rhovaluesList[indices[i]][-1]
spectralaxes[i].axvline(x=sigma,ymin=-10,ymax=10,color='dodgerblue',label='σ',linestyle='dotted')
spectralaxes[i].plot(ws,rhovaluesList[indices[i]][:nbrWs],label="Spectral function, σ")
spectralaxes[i].plot(ws,means,"--",label="Mean reconstruction",color="red")
spectralaxes[i].fill_between(ws,means-stddevs,means+stddevs,alpha=0.2, facecolor="red",
label='5*(Standard deviation)')
#Plot poles:
aksA,bksA,cksA,dksA = [], [], [], []
for j in range(len(polesAndSigma)):
aks,bks,cks,dks = [], [], [], []
for k in range(3):
aks.append(polesAndSigma[j][4*k])
bks.append(polesAndSigma[j][4*k + 1])
cks.append(polesAndSigma[j][4*k + 2])
dks.append(polesAndSigma[j][4*k + 3])
aksA.append(aks)
bksA.append(bks)
cksA.append(cks)
dksA.append(dks)
aM = np.mean(aksA,axis=0)
bM = np.mean(bksA,axis=0)
cM = np.mean(cksA,axis=0)
dM = np.mean(dksA,axis=0)
resmarkers = ["o","^","*"]
msizes = [7,9,11]
# from scipy.spatial import ConvexHull
for j in range(3):
#Plot all reconstructions:
for k in range(len(aksA)):
resaxes[i].plot(aksA[k][j],bksA[k][j],marker=resmarkers[j],markersize=msizes[j],color="red",alpha=0.1,label="All reconstructions")
for k in range(len(aksA)):
polesaxes[i].plot(cksA[k][j],dksA[k][j],marker=resmarkers[j],markersize=msizes[j],color="red",alpha=0.1,label="All reconstructions")
for j in range(3):
ajOrig = rhovaluesList[indices[i]][nbrWs + 4*j]
bjOrig = rhovaluesList[indices[i]][nbrWs + 4*j + 1]
resaxes[i].plot(ajOrig,bjOrig,marker=resmarkers[j],color="green",label="Original residues",markersize=msizes[j])
resaxes[i].plot(aM[j],bM[j],marker=resmarkers[j],color="lawngreen",label="Mean reconstruction",markersize=msizes[j])
#Plot convex hull:
# aHull = [elem[j] for elem in aksA]
# bHull = [elem[j] for elem in bksA]
# points = np.asarray([list(elem) for elem in zip(aHull,bHull)])
# hull = ConvexHull(points)
# resaxes[i].fill(points[hull.vertices,0],points[hull.vertices,1],'red',alpha=0.4)
cjOrig = rhovaluesList[indices[i]][nbrWs + 4*j + 2]
djOrig = rhovaluesList[indices[i]][nbrWs + 4*j + 3]
polesaxes[i].plot(cjOrig,djOrig,resmarkers[j],color="blue",label="Original poles",markersize=msizes[j])
polesaxes[i].plot(cM[j],dM[j],resmarkers[j],color="cyan",label="Mean reconstruction",markersize=msizes[j])
#Plot convex hull:
# cHull = [elem[j] for elem in cksA]
# dHull = [elem[j] for elem in dksA]
# points = np.asarray([list(elem) for elem in zip(cHull,dHull)])
# hull = ConvexHull(points)
# polesaxes[i].fill(points[hull.vertices,0],points[hull.vertices,1],'red',alpha=0.4)
resaxes[i].set_xlim([-1.5,1.5])
resaxes[i].set_ylim([-0.2,1.2])
resaxes[i].grid()
resaxes[i].set_xlabel("Re(R)")
resaxes[i].set_ylabel("Im(R)")
polesaxes[i].set_xlim([0.15,0.4])
polesaxes[i].set_ylim([0.2,0.8])
polesaxes[i].grid()
polesaxes[i].set_xlabel("Re(q)")
polesaxes[i].set_ylabel("Im(q)")
print("Mean, stddev of sigma:", np.mean(polesAndSigma,axis=0)[-1],np.std(polesAndSigma,axis=0)[-1])
handles, labels = ax11.get_legend_handles_labels()
ax11.legend(handles,labels,loc="upper center",bbox_to_anchor=(0.5,1.7))
handles, labels = ax12.get_legend_handles_labels()
ax12.legend(handles,labels,loc="upper center",bbox_to_anchor=(0.5,1.7))
handles,labels = ax12.get_legend_handles_labels()
origTuple = (handles[1],handles[0])
reconTuple = (handles[2])
std = (handles[3])
labels = ["Spectral function, σ","Mean reconstruction", "5*(Standard deviation)"]
ax12.legend((origTuple,reconTuple,std),labels,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=2,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.7),handlelength=4)
handles,labels = ax13.get_legend_handles_labels()
# print(handles)
origTuple = (handles[-6],handles[-4],handles[-2])
reconTuple = (handles[-5],handles[-3],handles[-1])
allRecon = (handles[0],handles[int(round(len(handles)/2))],handles[-7])
labels = ["Original poles", "Mean reconstruction","All reconstructions"]
ax13.legend((origTuple,reconTuple,allRecon),labels,scatterpoints=3,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=3,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.7),handlelength=4)
handles,labels = ax14.get_legend_handles_labels()
origTuple = (handles[-6],handles[-4],handles[-2])
reconTuple = (handles[-5],handles[-3],handles[-1])
allRecon = (handles[0],handles[int(round(len(handles)/2))],handles[-7])
labels = ["Original residues", "Mean reconstruction","All reconstructions"]
ax14.legend((origTuple,reconTuple,allRecon),labels,scatterpoints=3,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=3,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.7),handlelength=4)
# avgRhos.append(avgForIndex)
# print(means)
# print(NNinputs[0])
# plt.figure()
# # plt.plot(ws,predicData[0][:nbrWs],label="Reconstructed spectral function")
# plt.plot(ws,avgForIndex,label="Average reconstruction")
# plt.plot(ws,rhovaluesList[314][:nbrWs],label="Original spectral function")
# plt.legend()
| 12,731 | 33.597826 | 144 | py |
SpectralANN | SpectralANN-main/Database.py | from torch.utils.data import TensorDataset
import pandas as pd
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Using",device)
class Database():
def __init__(self, csv_target, csv_input, transform=None,nb_data=25000):
"""
Build the data set structure
Args:
csv_target (string): path to the target data (A(omega))
csv_input (string) : path to the input data (G(tau))
transform (callable, optional): Optional transform to be applied
on a sample (eg: add noise).
"""
self.input_data = pd.read_csv(csv_input,header=None,nrows=nb_data)
# print(self.input_data)
self.target_data = pd.read_csv(csv_target,header=None,nrows=nb_data)
self.transform = transform
def get_loader(self):
G=torch.tensor(self.input_data.values).double()
# normalization_factor=34.1
# A=torch.tensor(self.target_data.values).double()/normalization_factor
A=torch.tensor(self.target_data.values).double()
return TensorDataset(G.to(device),A.to(device)) | 1,130 | 38 | 79 | py |
SpectralANN | SpectralANN-main/test_ACANN.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 27 10:53:43 2021
@author: Thibault
"""
import torch
from ACANN import ACANN
from Database import Database
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import inputParameters as config
import pandas as pd
from matplotlib.legend_handler import HandlerTuple
from scipy import integrate
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
#Load input parameters from inputParameters.py
nbrWs = config.nbrWs
nbrOfPoles = config.nbrOfPoles
sizeOfTraining = config.trainingPoints
sizeOfValidation = config.validationPoints
outputSize = nbrWs + (4 * nbrOfPoles) + 1
pstart = config.pstart
pend = config.pend
nbrPoints = config.nbrPoints
inputSize = nbrPoints
print("NN input size {}, output size {} plus {} poles and sigma".format(inputSize,outputSize-4*nbrOfPoles-1,nbrOfPoles))
#Load the saved NN model (made in train_ACANN.py)
saved = "savedNNmodel.pth"
#Note: Make sure the dimensions are the same
model = ACANN(inputSize,outputSize,6*[600],drop_p=0.1).double()
model.load_state_dict(torch.load(saved))
model.eval()
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Number of parameters:",params)
#Load test data
path = "C:/Users/Thibault/Documents/Universiteit/Honours/Deel 2, interdisciplinair/Code/NN/Datasets/"
test_data = Database(csv_target= path + "rhoTest.csv", \
csv_input= path + "DTest.csv",nb_data=sizeOfValidation).get_loader()
testloader = DataLoader(test_data,batch_size=sizeOfValidation)
testloadList = list(testloader)
#Convert tensor to numpy array:
rhovaluesList = testloadList[0][1].to("cpu").numpy()
print(len(rhovaluesList),"testing functions")
prop_data = pd.read_csv(path+'DTest.csv',header=None,nrows=sizeOfValidation)
propList = prop_data.values.tolist()
# print(len(propList),"propagators")
params_data = pd.read_csv(path+'params.csv',header=None,nrows=sizeOfTraining+2*sizeOfValidation)
paramsList = params_data.values.tolist()
print("Data Loaded")
origProp = []
#Use NN to predict
with torch.no_grad():
D_test,rho_test = next(iter(testloader))
# print(D_test)
prediction = model.forward(D_test)
# print("output:",prediction)
predicData = prediction.to("cpu").numpy()
# print("output:",predicData)
origProp.append(D_test.to("cpu").numpy())
#Evaluate output:
ps = np.linspace(pstart,pend,nbrPoints)
ws = np.linspace(0.01,10,nbrWs)
def plotPolesForIndex(i,ax):
polemarkers = ["o","^","*"]
msizes = [7,9,11]
for j in range(nbrOfPoles):
#Only plot the poles
cj = predicData[i][nbrWs + 4*j + 2]
dj = predicData[i][nbrWs + 4*j + 3]
cjOrig = rhovaluesList[i][nbrWs + 4*j + 2]
djOrig = rhovaluesList[i][nbrWs + 4*j + 3]
if i != -1:
ax.plot(cjOrig,djOrig,polemarkers[j],color="blue",label="Original poles",markersize=msizes[j])
ax.plot(cj,dj,polemarkers[j],color="cyan",label="Reconstructed poles",markersize=msizes[j])
ax.set_xlim([0.15,0.4])
ax.set_ylim([0.2,0.8])
ax.grid()
ax.set_xlabel("Re(q)")
ax.set_ylabel("Im(q)")
def plotResiduesForIndex(i,ax):
resmarkers = ["o","^","*"]
msizes = [7,9,11]
for j in range(nbrOfPoles):
#Only plot the poles
aj = predicData[i][nbrWs + 4*j]
bj = predicData[i][nbrWs + 4*j + 1]
ajOrig = rhovaluesList[i][nbrWs + 4*j]
bjOrig = rhovaluesList[i][nbrWs + 4*j + 1]
if i != -1:
ax.plot(ajOrig,bjOrig,marker=resmarkers[j],color="green",label="Original residues",markersize=msizes[j])
ax.plot(aj,bj,marker=resmarkers[j],color="lawngreen",label="Reconstructed residues",markersize=msizes[j])
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-0.2,1.2])
ax.grid()
ax.set_xlabel("Re(R)")
ax.set_ylabel("Im(R)")
#Reconstruct propagator from reconstructed spectral function and poles:
def poles(p,N,poleList):
jsum = 0
for j in range(N):
a = poleList[j*4]
b = poleList[j*4+1]
c = poleList[j*4+2]
d = poleList[j*4+3]
nom = 2*(a*(c+p)+b*d)
denom = c**2 + 2*c*p + d**2 + p**2
jsum += nom/denom
return jsum
def reconstructProp(index):
sigma = predicData[index][-1]
wscutoff = min(range(len(ws)), key=lambda i: abs(ws[i]-sigma))
reconstructedPropSigma = []
for p in ps:
spectrFunc = []
for i in range(wscutoff,len(ws)):
spectrFunc.append(predicData[index][i]/(p**2+ws[i]))
integral = integrate.simpson(spectrFunc,x=ws[wscutoff:])
prop = integral + poles(p**2,3, predicData[index][nbrWs:nbrWs+12])
reconstructedPropSigma.append(prop)
rescaling = reconstructedPropSigma[12]
for i in range(len(ps)):
reconstructedPropSigma[i] = reconstructedPropSigma[i]/rescaling
return reconstructedPropSigma
def constraint15(index):
sigma = predicData[index][-1]
wscutoff = min(range(len(ws)), key=lambda i: abs(ws[i]-sigma))
res = integrate.simpson(predicData[index][wscutoff:nbrWs],x=ws[wscutoff:])
jsum = 0
for j in range(3):
jsum += 2 * predicData[index][nbrWs+j*4]
res += jsum
if res < 0.5 and res > -0.5:
return True
return False
def derivativeconstraint(index):
dp0 = origProp[0][index][0]
dp1 = origProp[0][index][1]
rho0 = predicData[index][0]
rho1 = predicData[index][1]
derivativeRho = (rho1 - rho0)/(ws[1]-ws[0])
derivativeProp = (dp1 - dp0)/(ps[1]**2-ps[0]**2)
derivativePoles = 0
for j in range(3):
a = predicData[index][nbrWs+j*4]
b = predicData[index][nbrWs+j*4+1]
c = predicData[index][nbrWs+j*4+2]
d = predicData[index][nbrWs+j*4+3]
#Alternate but equivalent formula:
derivativePoles += (-2*a*c**2 + 2*a*d**2 + 4*b*c*d)/((c**2 + d**2)**2)
idealDerivative = -np.pi*derivativeRho - derivativePoles
if derivativeProp < idealDerivative - 1 or \
derivativeProp > idealDerivative + 1:
return False
return True
def positivepropconstraint(index):
if min(reconstructProp(index)) < 0:
return False
return True
def testConstraints(index):
if constraint15(index) and derivativeconstraint(index) and positivepropconstraint(index):
return True
return False
getBestAndWorst = True
if getBestAndWorst:
#Get the best and worst test cases:
maxMAEindex = 0
maxMAE = 0
minMAEindex = 0
minMAE = 100000
constraintsSatisfied1 = 0
constraintsSatisfied2 = 0
constraintsSatisfied3 = 0
constraintsSatisfiedAll = 0
wssteps = ws[1] - ws[0]
fullSortedList = []
for i in range(len(rhovaluesList)):
MAE = 0
combListAll = zip(rhovaluesList[i],predicData[i])
scale = max(abs(rhovaluesList[i]))
for orig, recon in combListAll:
MAE += abs(orig-recon)/scale
#MAE on spectral function only
# combList = zip(rhovaluesList[i][:nbrWs],predicData[i][:nbrWs])
# scale = max(abs(rhovaluesList[i][:nbrWs]))
# for orig, recon in combList:
# MAE += abs(orig-recon)/scale
#MAE on poles only
# combList = zip(rhovaluesList[i][nbrWs:],predicData[i][nbrWs:])
# for orig, recon in combList:
# MAE += abs(orig-recon)
#MAE on propagator only
# reconProp = reconstructProp(i)
# combListProp = zip(propList[i],reconProp)
# scale = abs(max(propList[i]))
# for orig, recon in combListProp:
# MAE += abs(orig-recon)/scale
if MAE > maxMAE:
maxMAE = MAE
maxMAEindex = i
if MAE < minMAE:
minMAE = MAE
minMAEindex = i
fullSortedList.append((MAE,i))
if positivepropconstraint(i):
constraintsSatisfied1 += 1
if derivativeconstraint(i):
constraintsSatisfied2 += 1
if constraint15(i):
constraintsSatisfied3 += 1
if testConstraints(i):
constraintsSatisfiedAll += 1
fullSortedList.sort()
# print("Sorted all rhos")
print(str(constraintsSatisfied1)+"/"+str(len(rhovaluesList)), "recons satisfied constraint 1")
print(str(constraintsSatisfied2)+"/"+str(len(rhovaluesList)), "recons satisfied constraint 2")
print(str(constraintsSatisfied3)+"/"+str(len(rhovaluesList)), "recons satisfied constraint 3")
print(str(constraintsSatisfiedAll)+"/"+str(len(rhovaluesList)), "recons satisfied all constraints")
print("Min. MAE:",minMAE)
print("Max. MAE:",maxMAE)
percentile25th = fullSortedList[round(len(fullSortedList)/4)][1]
percentile50th = fullSortedList[round(2*len(fullSortedList)/4)][1]
percentile75th = fullSortedList[round(3*len(fullSortedList)/4)][1]
print("index of the best,25prct,50prct,75prct,worst:", \
[minMAEindex,percentile25th,percentile50th,percentile75th,maxMAEindex])
listOfpoles = []
for i in range(len(fullSortedList)):
nbrOfPoles = 0
for j in range(3):
ajOrig = rhovaluesList[i][nbrWs + 4*j]
bjOrig = rhovaluesList[i][nbrWs + 4*j + 1]
if ajOrig != 0 or bjOrig != 0:
nbrOfPoles += 1
if nbrOfPoles == 1:
listOfpoles.append(i)
print('1 pole:',listOfpoles)
fig, ((ax11,ax12,ax13,ax14),(ax21,ax22,ax23,ax24),(ax31,ax32,ax33,ax34), \
(ax41,ax42,ax43,ax44),(ax51,ax52,ax53,ax54)) = plt.subplots(5,4)
plotPolesForIndex(minMAEindex, ax13)
plotPolesForIndex(percentile25th,ax23)
plotPolesForIndex(percentile50th,ax33)
plotPolesForIndex(percentile75th,ax43)
plotPolesForIndex(maxMAEindex, ax53)
plotResiduesForIndex(minMAEindex, ax14)
plotResiduesForIndex(percentile25th,ax24)
plotResiduesForIndex(percentile50th,ax34)
plotResiduesForIndex(percentile75th,ax44)
plotResiduesForIndex(maxMAEindex, ax54)
indices = [minMAEindex,percentile25th,percentile50th,percentile75th,maxMAEindex]
propaxes = [ax11,ax21,ax31,ax41,ax51]
for i in range(len(propaxes)):
propaxes[i].plot(ps,propList[indices[i]],label="Propagator")
propaxes[i].plot(ps,reconstructProp(indices[i]),"--",label="Reconstructed propagator",color="red")
propaxes[i].set_xlabel("p")
propaxes[i].set_ylabel("D(p²)")
rhoaxes = [ax12,ax22,ax32,ax42,ax52]
for i in range(len(rhoaxes)):
if indices[i] != -1:
rhoaxes[i].plot(ws,rhovaluesList[indices[i]][:nbrWs],label="Spectral function")
sigma = rhovaluesList[indices[i]][-1]
rhoaxes[i].axvline(x=sigma,ymin=-10,ymax=10,color='dodgerblue',label='σ',linestyle='dotted')
rhoaxes[i].plot(ws,predicData[indices[i]][:nbrWs],"--",label="Reconstructed spectral function, σ",color="red")
sigma = predicData[indices[i]][-1]
rhoaxes[i].axvline(x=sigma,ymin=-10,ymax=10,color='orangered',linestyle='dotted',label='Reconstructed σ')
rhoaxes[i].set_xlabel("ω²")
rhoaxes[i].set_ylabel("ρ(ω)")
handles, labels = ax11.get_legend_handles_labels()
ax11.legend(handles,labels,loc="upper center",bbox_to_anchor=(0.5,1.5))
handles,labels = ax12.get_legend_handles_labels()
origTuple = (handles[0],handles[1])
reconTuple = (handles[2],handles[3])
labels = ["Spectral function, σ", "Reconstructed spectral function, σ"]
ax12.legend((origTuple,reconTuple),labels,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=2,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.5),handlelength=4)
handles,labels = ax13.get_legend_handles_labels()
origTuple = (handles[0],handles[2],handles[4])
reconTuple = (handles[1],handles[3],handles[5])
labels = ["Original poles", "Reconstructed poles"]
ax13.legend((origTuple,reconTuple),labels,scatterpoints=3,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=3,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.5),handlelength=4)
handles,labels = ax14.get_legend_handles_labels()
origTuple = (handles[0],handles[2],handles[4])
reconTuple = (handles[1],handles[3],handles[5])
labels = ["Original residues", "Reconstructed residues"]
ax14.legend((origTuple,reconTuple),labels,scatterpoints=3,
numpoints=1, handler_map={tuple: HandlerTuple(ndivide=3,pad=1.3)},
loc="upper center",bbox_to_anchor=(0.5,1.5),handlelength=4)
# fig.set_tight_layout(True)
| 12,953 | 32.734375 | 120 | py |
SpectralANN | SpectralANN-main/propagatorNoise.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 30 15:06:14 2021
@author: Thibault
"""
from Database import Database
from torch.utils.data import DataLoader
import inputParameters as config
import numpy as np
import pandas as pd
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
noiseSize = 5e-3
#Load input parameters from inputParameters.py
sizeOfTraining = config.trainingPoints
sizeOfValidation = config.validationPoints
pstart = config.pstart
pend = config.pend
nbrPoints = config.nbrPoints
path = "C:/Users/Thibault/Documents/Universiteit/\
Honours/Deel 2, interdisciplinair/Code/NN/Datasets/"
train_data = Database(csv_target= path + "rhoTraining.csv",\
csv_input= path + "DTrainingRaw.csv",nb_data=sizeOfTraining).get_loader()
trainloader = DataLoader(train_data,batch_size=sizeOfTraining)
print("Training data loaded")
validation_data = Database(csv_target= path + "rhoValidation.csv",\
csv_input= path + "DValidationRaw.csv",nb_data=sizeOfValidation).get_loader()
validationloader = DataLoader(validation_data,batch_size=sizeOfValidation)
print("Validation data loaded")
test_data = Database(csv_target= path + "rhoTest.csv",\
csv_input= path + "DTestRaw.csv",nb_data=sizeOfValidation).get_loader()
testloader = DataLoader(test_data,batch_size=sizeOfValidation)
print("Test data loaded")
alldatatensors = list(trainloader)
alldata = alldatatensors[0][0].to("cpu").numpy()
alldatatensorsValid = list(validationloader)
alldataValid = alldatatensorsValid[0][0].to("cpu").numpy()
alldatatensorsTest = list(testloader)
alldataTest = alldatatensorsTest[0][0].to("cpu").numpy()
#Add noise:
for i in range(len(alldata)):
noise = np.random.normal(1,noiseSize,nbrPoints)
alldata[i] = alldata[i] * noise
for i in range(len(alldataValid)):
noise = np.random.normal(1,noiseSize,nbrPoints)
alldataValid[i] = alldataValid[i] * noise
for i in range(len(alldataTest)):
noise = np.random.normal(1,noiseSize,nbrPoints)
alldataTest[i] = alldataTest[i] * noise
print(len(alldata),"training points")
path = "C:/Users/Thibault/Documents/Universiteit/Honours/Deel 2, interdisciplinair/Code/NN/Datasets/"
#Write data to these files (first deletes old ones)
propTrain_csv = path+'DTraining.csv'
if os.path.exists(propTrain_csv):
os.remove(propTrain_csv)
propValid_csv = path+'DValidation.csv'
if os.path.exists(propValid_csv):
os.remove(propValid_csv)
propTest_csv = path+'DTest.csv'
if os.path.exists(propTest_csv):
os.remove(propTest_csv)
#Write data to files
propTraindf = pd.DataFrame(alldata)
propTraindf.to_csv(propTrain_csv,index=False,header=False,mode='a')
propValiddf = pd.DataFrame(alldataValid)
propValiddf.to_csv(propValid_csv,index=False,header=False,mode='a')
propTestdf = pd.DataFrame(alldataTest)
propTestdf.to_csv(propTest_csv,index=False,header=False,mode='a')
print("Succesfully added artificial noise to training data.")
| 2,951 | 27.660194 | 101 | py |
SpectralANN | SpectralANN-main/ACANN.py | import torch.nn as nn
import torch.nn.functional as F
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ACANN(nn.Module):
def __init__(self,input_size,output_size,hidden_layers,drop_p=0.05):
""" Builds ACANN network with arbitrary number of hidden layers.
Arguments
----------
input_size : integer, size of the input
output_size : integer, size of the output layer
hidden_layers: list of integers, the sizes of the hidden layers
drop_p: float in (0,1) , value of the dropout probability
"""
super().__init__()
# Add the first layer : input_size into the first hidden layer
self.layers = nn.ModuleList([nn.Linear(input_size,hidden_layers[0]).to(device)])
self.normalizations = nn.ModuleList([nn.BatchNorm1d(input_size).to(device)])
# Add the other layers
layers_sizes = zip(hidden_layers[:-1],hidden_layers[1:])
self.layers.extend([nn.Linear(h1,h2).to(device) for h1,h2 in layers_sizes])
self.normalizations.extend([nn.BatchNorm1d(size).to(device) for size in hidden_layers])
self.output=nn.Linear(hidden_layers[-1],output_size).to(device)
self.dropout = nn.Dropout(drop_p).to(device)
def forward(self,x):
# pass through each layers
for layer,normalization in zip(self.layers,self.normalizations):
x=normalization(x)
x=F.relu(layer(x))
x=self.dropout(x)
x=self.output(x)
return x
| 1,617 | 29.528302 | 95 | py |
NeuralBKI | NeuralBKI-main/generate_results.py | # This file generates results for evaluation by loading semantic predictions from files.
# Not intended for use on-board robot.
import os
import pdb
import time
import json
import rospy
import yaml
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
import numpy as np
import copy
from tqdm import tqdm
# Torch imports
import torch
from torch import nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
# Custom Imports
from Data.utils import *
from Models.model_utils import *
from Models.ConvBKI import *
from Data.Rellis3D import Rellis3dDataset
from Models.mapping_utils import *
from Data.SemanticKitti import KittiDataset
from Data.KittiOdometry import KittiOdomDataset
import time
MODEL_NAME = "ConvBKI_Single"
# MODEL_NAME = "ConvBKI_Single_02_odom"
print("Model is:", MODEL_NAME)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("device is ", device)
# Model Parameters
model_params_file = os.path.join(os.getcwd(), "Config", MODEL_NAME + ".yaml")
with open(model_params_file, "r") as stream:
try:
model_params = yaml.safe_load(stream)
dataset = model_params["dataset"]
except yaml.YAMLError as exc:
print(exc)
# CONSTANTS
SEED = model_params["seed"]
NUM_FRAMES = model_params["num_frames"]
MODEL_RUN_DIR = os.path.join("Models", "Runs", MODEL_NAME + "_" + dataset)
NUM_WORKERS = model_params["num_workers"]
FLOAT_TYPE = torch.float32
LABEL_TYPE = torch.uint8
MAP_METHOD = model_params["map_method"]
LOAD_EPOCH = model_params["load_epoch"]
LOAD_DIR = model_params["save_dir"]
VISUALIZE = model_params["visualize"]
MEAS_RESULT = model_params["meas_result"]
GEN_PREDS = model_params["gen_preds"]
FROM_CONT = model_params["from_continuous"]
TO_CONT = model_params["to_continuous"]
PRED_PATH = model_params["pred_path"]
# Data Parameters
data_params_file = os.path.join(os.getcwd(), "Config", dataset + ".yaml")
with open(data_params_file, "r") as stream:
try:
data_params = yaml.safe_load(stream)
NUM_CLASSES = data_params["num_classes"]
colors = remap_colors(data_params["colors"])
DATA_DIR = data_params["data_dir"]
ignore_labels = data_params["ignore_labels"]
except yaml.YAMLError as exc:
print(exc)
print("Visualize Prediciton:", VISUALIZE)
print("Measure Result:", MEAS_RESULT)
print("Generate Prediction:", GEN_PREDS)
print("")
# Exit if measure result on test set
if MEAS_RESULT and model_params["result_split"] == "test":
print("Error! Measure result can only be ran on train/val sets, test set does not have ground truth labels.")
exit()
# Load data set
if dataset == "rellis":
test_ds = Rellis3dDataset(model_params["test"]["grid_params"], directory=DATA_DIR, device=device,
num_frames=NUM_FRAMES, remap=True, use_aug=False, data_split="test")
elif dataset == "semantic_kitti":
if MEAS_RESULT:
test_ds = KittiDataset(model_params["test"]["grid_params"], directory=DATA_DIR, device=device,
num_frames=NUM_FRAMES, remap=True, use_aug=False, data_split=model_params["result_split"],
from_continuous=FROM_CONT, to_continuous=TO_CONT, pred_path=PRED_PATH)
else:
test_ds = KittiDataset(model_params["test"]["grid_params"], directory=DATA_DIR, device=device,
num_frames=NUM_FRAMES, remap=True, use_aug=False, data_split=model_params["result_split"],
from_continuous=FROM_CONT, to_continuous=TO_CONT, pred_path=PRED_PATH)
elif dataset == "kitti_odometry":
if MEAS_RESULT:
test_ds = KittiOdomDataset(model_params["train"]["grid_params"], directory=DATA_DIR, device=device,
num_frames=NUM_FRAMES, remap=False, use_aug=False, data_split=model_params["result_split"], from_continuous=FROM_CONT,
to_continuous=TO_CONT)
else:
test_ds = KittiOdomDataset(model_params["train"]["grid_params"], directory=DATA_DIR, device=device,
num_frames=NUM_FRAMES, remap=False, use_aug=False, data_split=model_params["result_split"], from_continuous=FROM_CONT,
to_continuous=TO_CONT)
dataloader_test = DataLoader(test_ds, batch_size=1, shuffle=False, collate_fn=test_ds.collate_fn, num_workers=NUM_WORKERS, pin_memory=True)
# Create map object
grid_params = model_params["test"]["grid_params"]
map_object = GlobalMap(
torch.tensor([int(p) for p in grid_params['grid_size']], dtype=torch.long).to(device), # Grid size
torch.tensor(grid_params['min_bound']).to(device), # Lower bound
torch.tensor(grid_params['max_bound']).to(device), # Upper bound
torch.load(os.path.join("Models", "Weights", LOAD_DIR, "filters" + str(LOAD_EPOCH) + ".pt")), # Filters
model_params["filter_size"], # Filter size
num_classes=NUM_CLASSES,
ignore_labels = ignore_labels, # Classes
device=device # Device
)
if VISUALIZE:
rospy.init_node('talker', anonymous=True)
map_pub = rospy.Publisher('SemMap_global', MarkerArray, queue_size=10)
next_map = MarkerArray()
if GEN_PREDS:
if not os.path.exists(MODEL_NAME):
os.mkdir(MODEL_NAME)
# Iteratively loop through each scan
current_scene = None
current_frame_id = None
seq_dir = None
frame_num = 0
total_class = torch.zeros(map_object.num_classes, device=device)
total_int_bki = torch.zeros(map_object.num_classes, device=device)
total_int_seg = torch.zeros(map_object.num_classes, device=device)
total_un_bki = torch.zeros(map_object.num_classes, device=device)
total_un_seg = torch.zeros(map_object.num_classes, device=device)
total_t = 0.0
for idx in tqdm(range(len(test_ds))):
with torch.no_grad():
# Load data
get_gt = model_params["result_split"] == "train" or model_params["result_split"] == "val"
pose, points, pred_labels, gt_labels, scene_id, frame_id = test_ds.get_test_item(idx, get_gt=get_gt)
if VISUALIZE and MEAS_RESULT:
if dataset == "semantic_kitti":
not_void = (gt_labels != 0)[:, 0]
points = points[not_void, :]
pred_labels = pred_labels[not_void, :]
gt_labels = gt_labels[not_void, :]
if GEN_PREDS and seq_dir is None:
seq_dir = os.path.join(MODEL_NAME, "sequences", str(scene_id).zfill(2), "predictions")
# Reset if new subsequence
if scene_id != current_scene or (frame_id - 1) != current_frame_id:
map_object.reset_grid()
if GEN_PREDS:
seq_dir = os.path.join(MODEL_NAME, "sequences", str(scene_id).zfill(2), "predictions")
frame_num = 0
if not os.path.exists(seq_dir):
os.makedirs(seq_dir)
# Update pose if not
start_t = time.time()
map_object.propagate(pose)
# Add points to map
labeled_pc = np.hstack((points, pred_labels))
labeled_pc_torch = torch.from_numpy(labeled_pc).to(device=device, non_blocking=True)
map_object.update_map(labeled_pc_torch)
total_t += time.time() - start_t
current_scene = scene_id
current_frame_id = frame_id
if VISUALIZE:
if rospy.is_shutdown():
exit("Closing Python")
try:
if MAP_METHOD == "global" or MAP_METHOD == "local":
map = publish_voxels(map_object, grid_params['min_bound'], grid_params['max_bound'], grid_params['grid_size'], colors, next_map)
map_pub.publish(map)
elif MAP_METHOD == "local":
map = publish_local_map(map_object.local_map, map_object.centroids, grid_params, colors, next_map)
map_pub.publish(map)
except:
exit("Publishing broke")
if MEAS_RESULT:
if dataset == "semantic_kitti":
# Filter out ignore labels
non_ignore_mask = (gt_labels != ignore_labels[0])[:, 0]
points = points[non_ignore_mask, :]
gt_labels = gt_labels[non_ignore_mask, :]
pred_labels = pred_labels[non_ignore_mask, :]
# Make predictions and measure
predictions, local_mask = map_object.label_points(points)
pred_labels = torch.from_numpy(pred_labels).to(device, non_blocking=True)
if pred_labels.shape[1] > 1:
pred_labels = torch.argmax(pred_labels, dim=1)
else:
pred_labels = pred_labels.view(-1)
gt_labels = torch.from_numpy(gt_labels).to(device, non_blocking=True).view(-1)
# TODO: Change this line if needed. Maps outside local mask to segmentation labels.
predictions_temp = pred_labels.detach().clone().to(predictions.dtype)
predictions_temp[local_mask] = predictions[local_mask]
predictions = predictions_temp
for i in range(1, map_object.num_classes):
gt_i = gt_labels == i
pred_bki_i = predictions == i
pred_seg_i = pred_labels == i
total_class[i] += torch.sum(gt_i)
total_int_bki[i] += torch.sum(gt_i & pred_bki_i)
total_int_seg[i] += torch.sum(gt_i & pred_seg_i)
total_un_bki[i] += torch.sum(gt_i | pred_bki_i)
total_un_seg[i] += torch.sum(gt_i | pred_seg_i)
if idx % 100 == 0 and not GEN_PREDS:
print(idx, len(test_ds))
print("BKI:", total_int_bki / total_un_bki * 100)
print("Seg:", total_int_seg / total_un_seg * 100)
if dataset == "kitti_odometry":
dists = np.linalg.norm(points, axis=1)
in_range = dists < 40
points = points[in_range, :]
gt_labels = gt_labels[in_range]
pred_labels = pred_labels[in_range]
predictions, local_mask = map_object.label_points(points)
pred_labels = torch.from_numpy(pred_labels).to(device, non_blocking=True)
if pred_labels.shape[1] > 1:
pred_labels = torch.argmax(pred_labels, dim=1)
else:
pred_labels = pred_labels.view(-1)
gt_labels = torch.from_numpy(gt_labels).to(device, non_blocking=True).view(-1)
# TODO: Mask here?
gt_labels[~local_mask] = ignore_labels[0]
pred_labels[~local_mask] = ignore_labels[0]
for i in range(map_object.num_classes):
gt_i = gt_labels == i
pred_bki_i = predictions == i
pred_seg_i = pred_labels == i
total_class[i] += torch.sum(gt_i)
total_int_bki[i] += torch.sum(gt_i & pred_bki_i)
total_int_seg[i] += torch.sum(gt_i & pred_seg_i)
total_un_bki[i] += torch.sum(gt_i | pred_bki_i)
total_un_seg[i] += torch.sum(gt_i | pred_seg_i)
if GEN_PREDS:
frame_file = os.path.join(seq_dir, str(frame_num).zfill(6) + ".label")
# Make predictions
predictions, local_mask = map_object.label_points(points)
if MEAS_RESULT:
pred_labels = torch.unsqueeze(pred_labels, dim=-1)
if pred_labels.shape[1] > 1:
pred_labels = torch.argmax(pred_labels, dim=1)
else:
pred_labels = pred_labels.view(-1)
else:
pred_labels = torch.from_numpy(pred_labels).to(device)
if pred_labels.shape[1] > 1:
pred_labels = torch.argmax(pred_labels, dim=1)
else:
pred_labels = pred_labels.view(-1)
# Maps outside local mask to segmentation labels.
predictions_temp = pred_labels.detach().clone().to(predictions.dtype)
predictions_temp[local_mask] = predictions[local_mask]
predictions = predictions_temp.view(-1).detach().cpu().numpy().astype(np.uint32)
# Save
predictions.tofile(frame_file)
frame_num += 1
if MEAS_RESULT:
print("Final results:")
if dataset == "kitti_odometry":
bki_result = (total_int_bki / total_un_bki * 100).detach().cpu().numpy()
seg_result = (total_int_seg / total_un_seg * 100).detach().cpu().numpy()
bki_result_t = copy.deepcopy(bki_result)
seg_result_t = copy.deepcopy(seg_result)
Shift = [0, 1, 2, 3, 4, 7, 5, 8, 9, 6, 10]
for i, label in enumerate(Shift):
bki_result[label] = bki_result_t[i]
seg_result[label] = seg_result_t[i]
print("BKI:")
for i in range(bki_result.shape[0]-3):
print(bki_result[i])
print("Seg:")
for i in range(seg_result.shape[0]-3):
print(seg_result[i])
else:
print("Seg:")
for i in range(NUM_CLASSES):
print((total_int_seg[i] / total_un_seg[i] * 100).item())
print("BKI:")
for i in range(NUM_CLASSES):
print((total_int_bki[i] / total_un_bki[i] * 100).item())
| 13,528 | 41.410658 | 150 | py |
NeuralBKI | NeuralBKI-main/train.py | import os
import pdb
import time
import json
import yaml
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
import numpy as np
from tqdm import tqdm
# Torch imports
import torch
from torch import nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
# Custom Imports
from Data.utils import *
from Models.model_utils import *
from Models.ConvBKI import *
from Data.Rellis3D import Rellis3dDataset
from Data.SemanticKitti import KittiDataset
from Data.KittiOdometry import KittiOdomDataset
MODEL_NAME = "ConvBKI_Single"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("device is ", device)
print("Model is", MODEL_NAME)
model_params_file = os.path.join(os.getcwd(), "Config", MODEL_NAME + ".yaml")
with open(model_params_file, "r") as stream:
try:
model_params = yaml.safe_load(stream)
dataset = model_params["dataset"]
SAVE_NAME = model_params["save_dir"]
except yaml.YAMLError as exc:
print(exc)
# CONSTANTS
SEED = model_params["seed"]
DEBUG_MODE = model_params["debug_mode"]
NUM_FRAMES = model_params["num_frames"]
MODEL_RUN_DIR = os.path.join("Models", "Runs", SAVE_NAME)
NUM_WORKERS = model_params["num_workers"]
FLOAT_TYPE = torch.float32
LABEL_TYPE = torch.uint8
FROM_CONT = model_params["from_continuous"]
TO_CONT = model_params["to_continuous"]
PRED_PATH = model_params["pred_path"]
time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S")
if not os.path.exists(MODEL_RUN_DIR):
WEIGHTS_DIR = os.path.join("Models", "Weights")
if os.path.exists(WEIGHTS_DIR):
MODEL_RUN_DIR = MODEL_RUN_DIR + "_" + time_stamp
os.makedirs(MODEL_RUN_DIR)
# Data Parameters
data_params_file = os.path.join(os.getcwd(), "Config", dataset + ".yaml")
with open(data_params_file, "r") as stream:
try:
data_params = yaml.safe_load(stream)
NUM_CLASSES = data_params["num_classes"]
class_frequencies = np.asarray([data_params["class_counts"][i] for i in range(NUM_CLASSES)])
TRAIN_DIR = data_params["data_dir"]
except yaml.YAMLError as exc:
print(exc)
epsilon_w = 1e-5 # eps to avoid zero division
# TODO: Try counting for seq 4, ablation studies on seq 4
weights = np.zeros(class_frequencies.shape)
weights[1:] = (1 / np.log(class_frequencies[1:] + epsilon_w) )
weights = torch.from_numpy(weights).to(dtype=FLOAT_TYPE, device=device, non_blocking=True)
criterion = nn.NLLLoss(weight=weights, ignore_index=0)
scenes = [ s for s in sorted(os.listdir(TRAIN_DIR)) if s.isdigit() ]
# Load model
lr = model_params["train"]["lr"]
BETA1 = model_params["train"]["BETA1"]
BETA2 = model_params["train"]["BETA2"]
decayRate = model_params["train"]["decayRate"]
B = model_params["train"]["B"]
EPOCH_NUM = model_params["train"]["num_epochs"]
model_params["device"] = device
model_params["num_classes"] = NUM_CLASSES
model_params["datatype"] = FLOAT_TYPE
model = get_model(MODEL_NAME, model_params=model_params)
if dataset == "rellis":
train_ds = Rellis3dDataset(model_params["train"]["grid_params"], directory=TRAIN_DIR, device=device,
num_frames=NUM_FRAMES, remap=True, use_aug=False)
val_ds = Rellis3dDataset(model_params["train"]["grid_params"], directory=TRAIN_DIR, device=device,
num_frames=NUM_FRAMES, remap=True, use_aug=False, data_split="val")
if dataset == "semantic_kitti":
# Save splits info
train_ds = KittiDataset(model_params["train"]["grid_params"], directory=TRAIN_DIR, device=device,
num_frames=NUM_FRAMES, remap=True, use_aug=True, data_split="train", from_continuous=FROM_CONT,
to_continuous=TO_CONT, pred_path=PRED_PATH)
val_ds = KittiDataset(model_params["train"]["grid_params"], directory=TRAIN_DIR, device=device,
num_frames=NUM_FRAMES, remap=True, use_aug=False, data_split="val", from_continuous=FROM_CONT,
to_continuous=TO_CONT, pred_path=PRED_PATH)
if dataset == "kitti_odometry":
train_ds = KittiOdomDataset(model_params["train"]["grid_params"], directory=TRAIN_DIR, device=device,
num_frames=NUM_FRAMES, remap=False, use_aug=False, data_split="train", from_continuous=FROM_CONT,
to_continuous=TO_CONT, num_classes=model_params["num_classes"])
val_ds = KittiOdomDataset(model_params["train"]["grid_params"], directory=TRAIN_DIR, device=device,
num_frames=NUM_FRAMES, remap=False, use_aug=False, data_split="val", from_continuous=FROM_CONT,
to_continuous=TO_CONT, num_classes=model_params["num_classes"])
dataloader_train = DataLoader(train_ds, batch_size=B, shuffle=True, collate_fn=train_ds.collate_fn, num_workers=NUM_WORKERS, pin_memory=True)
dataloader_val = DataLoader(val_ds, batch_size=B, shuffle=False, collate_fn=val_ds.collate_fn, num_workers=NUM_WORKERS, pin_memory=True)
trial_dir = MODEL_RUN_DIR
save_dir = os.path.join("Models", "Weights", SAVE_NAME)
if not DEBUG_MODE:
if os.path.exists(save_dir):
save_dir_before = save_dir
save_dir = save_dir + "_" + time_stamp
print("Pretrained model already exists at: {}, the new trained model will be saved at: {} \n".format(save_dir_before, save_dir))
if not os.path.exists(trial_dir):
os.makedirs(trial_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
writer = SummaryWriter(MODEL_RUN_DIR)
# Optimizer setup
setup_seed(SEED)
if model_params["train"]["opt"] == "Adam":
optimizer = optim.Adam(model.parameters(), lr=lr, betas=(BETA1, BETA2))
else:
optimizer = optim.SGD(model.parameters(), lr=lr)
my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=decayRate)
torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=100, eta_min=1e-4, verbose=True)
train_count = 0
min_bound_torch = torch.from_numpy(train_ds.min_bound).to(device=device)
grid_dims_torch = torch.from_numpy(train_ds.grid_dims).to(dtype=torch.int, device=device)
voxel_sizes_torch = torch.from_numpy(train_ds.voxel_sizes).to(device=device)
if DEBUG_MODE:
rospy.init_node('talker', anonymous=True)
map_pub = rospy.Publisher('SemMap_global', MarkerArray, queue_size=10)
def semantic_loop(dataloader, epoch, train_count=None, training=False):
num_correct = 0
num_total = 0
all_intersections = np.zeros(NUM_CLASSES)
all_unions = np.zeros(NUM_CLASSES) + 1e-6 # SMOOTHING
next_map = MarkerArray()
for points, points_labels, gt_labels in tqdm(dataloader):
batch_gt = torch.zeros((0, 1), device=device, dtype=LABEL_TYPE)
batch_preds = torch.zeros((0, NUM_CLASSES), device=device, dtype=FLOAT_TYPE)
optimizer.zero_grad()
for b in range(len(points)):
current_map = model.initialize_grid()
if model_params["train"]["remove_last"]:
pc_np = np.vstack(np.array(points[b][:-1]))
labels_np = np.vstack(np.array(points_labels[b][:-1]))
else:
pc_np = np.vstack(np.array(points[b], dtype=object))
labels_np = np.vstack(np.array(points_labels[b], dtype=object))
labeled_pc = np.hstack((pc_np, labels_np))
if labeled_pc.shape[0] == 0: # Zero padded
print("Something is very wrong!")
exit()
labeled_pc_torch = torch.from_numpy(labeled_pc).to(device=device, non_blocking=True)
preds = model(current_map, labeled_pc_torch)
gt_sem_labels = torch.from_numpy(gt_labels[b]).to(device=device, non_blocking=True)
if DEBUG_MODE:
grid_params = model_params["test"]["grid_params"]
colors = remap_colors(data_params["colors"])
if rospy.is_shutdown():
exit("Closing Python")
try:
next_map = publish_local_map(preds, model.centroids, grid_params, colors, next_map)
map_pub.publish(next_map)
except:
exit("Publishing broke")
last_pc_torch = torch.from_numpy(points[b][-1]).to(device=device, non_blocking=True)
sem_preds = points_to_voxels_torch(preds, last_pc_torch,
min_bound_torch, grid_dims_torch, voxel_sizes_torch)
# Evaluate on last frame in scan (most recent one)
sem_preds = sem_preds / torch.sum(sem_preds, dim=-1, keepdim=True)
# Remove all that are 0 zero label
# TODO change to use ignore list
non_void_mask = gt_sem_labels[:, 0] != 0
batch_gt = torch.vstack((batch_gt, gt_sem_labels[non_void_mask, :]))
batch_preds = torch.vstack((batch_preds, sem_preds[non_void_mask, :]))
batch_gt = batch_gt.reshape(-1)
# Remove ignore labels
not_ignore_mask = batch_gt != 0
batch_preds = batch_preds[not_ignore_mask, :]
batch_gt = batch_gt[not_ignore_mask]
loss = criterion(torch.log(batch_preds), batch_gt.long())
if training:
if DEBUG_MODE:
if model.compound:
print("Z:", model.ell_z)
print("H:", model.ell_h)
else:
print(model.ell)
loss.backward()
optimizer.step()
# Accuracy
with torch.no_grad():
# Softmax on expectation
max_batch_preds = torch.argmax(batch_preds, dim=-1)
preds_masked = max_batch_preds.cpu().numpy()
voxels_np = batch_gt.detach().cpu().numpy()
num_correct += np.sum(preds_masked == voxels_np)
num_total += voxels_np.shape[0]
accuracy = np.sum(preds_masked == voxels_np) / voxels_np.shape[0]
inter, union = iou_one_frame(max_batch_preds, batch_gt, n_classes=NUM_CLASSES)
union += 1e-6
all_intersections += inter
all_unions += union
# Record
if training:
writer.add_scalar(SAVE_NAME + '/Loss/Train', loss.item(), train_count)
writer.add_scalar(SAVE_NAME + '/Accuracy/Train', accuracy, train_count)
writer.add_scalar(SAVE_NAME + '/mIoU/Train', np.mean(inter / union), train_count)
train_count += len(points)
# Save model, decrease learning rate
if training:
my_lr_scheduler.step()
print("Epoch ", epoch, " out of ", EPOCH_NUM, " complete.")
if not training:
all_intersections = all_intersections[all_unions > 0]
all_unions = all_unions[all_unions > 0]
print(f'Epoch Num: {epoch} ------ average val accuracy: {num_correct/num_total}')
print(f'Epoch Num: {epoch} ------ val miou: {np.mean(all_intersections / all_unions)}')
writer.add_scalar(SAVE_NAME + '/Accuracy/Val', num_correct/num_total, epoch)
writer.add_scalar(SAVE_NAME + '/mIoU/Val', np.mean(all_intersections / all_unions), epoch)
return model, train_count
def save_filter(model, save_path):
filters = model.get_filters()
torch.save(filters, save_path)
print("Evaluation on the default kernel:")
for epoch in range(EPOCH_NUM):
# Save filters before any training
if not DEBUG_MODE:
save_filter(model, os.path.join(save_dir, "filters" + str(epoch) + ".pt"))
# Validation
model.eval()
with torch.no_grad():
semantic_loop(dataloader_val, epoch, training=False)
# Training
model.train()
idx = 0
model, train_count = semantic_loop(dataloader_train, epoch, train_count=train_count, training=True)
# Validation
epoch = EPOCH_NUM
model.eval()
with torch.no_grad():
semantic_loop(dataloader_val, epoch, training=False)
save_filter(model, os.path.join(save_dir, "filters" + str(epoch) + ".pt"))
writer.close()
| 11,927 | 39.989691 | 141 | py |
NeuralBKI | NeuralBKI-main/Data/KittiOdometry.py | import os
import numpy as np
# from utils import laserscan
import yaml
from torch.utils.data import Dataset
import torch
# import spconv
import math
from scipy.spatial.transform import Rotation as R
config_file = os.path.join('Config/kitti_odometry.yaml')
kitti_config = yaml.safe_load(open(config_file, 'r'))
SPLIT_SEQUENCES = kitti_config["SPLIT_SEQUENCES"]
def grid_ind(input_pc, labels, min_bound, max_bound, grid_size, voxel_sizes):
'''
Input:
input_xyz: N * (x, y, z, c) float32 array, point cloud
Output:
grid_inds: N' * (x, y, z, c) int32 array, point cloud mapped to voxels
'''
input_xyz = input_pc[:, :3]
valid_input_mask = np.all((input_xyz < max_bound) & (input_xyz >= min_bound), axis=1)
valid_xyz = input_xyz[valid_input_mask]
labels = labels[valid_input_mask]
grid_inds = np.floor((valid_xyz - min_bound) / voxel_sizes)
maxes = (grid_size - 1).reshape(1, 3)
clipped_inds = np.clip(grid_inds, np.zeros_like(maxes), maxes)
return clipped_inds, labels, valid_xyz
class KittiOdomDataset(Dataset):
"""Kitti Dataset for Neural BKI project
Access to the processed data, including evaluation labels predictions velodyne poses times
"""
def __init__(self,
grid_params,
directory="/home/jason/kitti_odometry",
device='cuda',
num_frames=1,
voxelize_input=True,
binary_counts=False,
random_flips=False,
use_aug=True,
apply_transform=True,
remap=False,
from_continuous=False,
to_continuous=False,
num_classes = 20,
data_split='train'
):
self.use_aug = use_aug
self.apply_transform = apply_transform
self._grid_size = grid_params['grid_size']
self.grid_dims = np.asarray(self._grid_size)
self._eval_size = list(np.uint32(self._grid_size))
self.coor_ranges = grid_params['min_bound'] + grid_params['max_bound']
self.voxel_sizes = [abs(self.coor_ranges[3] - self.coor_ranges[0]) / self._grid_size[0],
abs(self.coor_ranges[4] - self.coor_ranges[1]) / self._grid_size[1],
abs(self.coor_ranges[5] - self.coor_ranges[2]) / self._grid_size[2]]
self.min_bound = np.asarray(self.coor_ranges[:3])
self.max_bound = np.asarray(self.coor_ranges[3:])
self.voxel_sizes = np.asarray(self.voxel_sizes)
self.voxelize_input = voxelize_input
self.binary_counts = binary_counts
self._directory = directory
self._num_frames = num_frames
self.device = device
self.random_flips = random_flips
self.remap = remap
self.from_continuous = from_continuous
self.to_continuous = to_continuous
self.num_classes = num_classes
self.split = data_split
self._velodyne_list = []
self._label_list = []
self._pred_list = []
self._frames_list = []
self._poses = np.empty((0,12))
self._num_frames_scene = []
self._frames_list_label = []
self._seqs = SPLIT_SEQUENCES[self.split]
self._scene_id = []
for seq in self._seqs:
velodyne_dir = os.path.join(self._directory, seq, 'training/pointcloud')
label_dir = os.path.join(self._directory, seq, 'training/labels')
if self.from_continuous:
preds_dir = os.path.join(self._directory, seq, 'training/predictions_continuous')
else:
preds_dir = os.path.join(self._directory, seq, 'training/predictions')
self._num_frames_scene.append(len(os.listdir(label_dir)))
self._scene_id += [seq] * len(os.listdir(velodyne_dir))
frames_list = [os.path.splitext(filename)[0] for filename in sorted(os.listdir(velodyne_dir))]
frames_list_label = [os.path.splitext(filename)[0] for filename in sorted(os.listdir(label_dir))]
self._frames_list_label = frames_list_label
pose = np.loadtxt(os.path.join(self._directory, seq, 'training/CameraTrajectory.txt'))[:(len(frames_list))]
self._poses = np.vstack((self._poses, pose))
self._frames_list.extend(frames_list)
self._velodyne_list.extend([os.path.join(velodyne_dir, str(frame).zfill(6)+'.bin') for frame in frames_list])
self._label_list.extend([os.path.join(label_dir, str(frame).zfill(6)+'.label') for frame in frames_list_label])
if self.from_continuous:
self._pred_list.extend([os.path.join(preds_dir, str(frame).zfill(6)+'.bin') for frame in frames_list])
else:
self._pred_list.extend([os.path.join(preds_dir, str(frame).zfill(6)+'.label') for frame in frames_list])
self._poses = self._poses.reshape(len(frames_list), 12)
def collate_fn(self, data):
points_batch = [bi[0] for bi in data]
label_batch = [bi[1] for bi in data]
gt_label_batch = [bi[2] for bi in data]
return points_batch, label_batch, gt_label_batch
def get_aug_matrix(self, trans):
"""
trans - 1 or 2 specifies reflection about XZ or YZ plane
any other value gives rotation matrix
Double checked with rotation matrix calculator
"""
if trans==1:
trans = np.eye(3)
trans[1][1] = -1
elif trans==2:
trans = np.eye(3)
trans[0][0] = -1
else:
if trans==0:
angle = 0
else:
angle = (trans-2)*90
trans = R.from_euler('z', angle, degrees=True).as_matrix()
return trans
def get_pose(self, frame_id):
pose = np.zeros((4, 4))
pose[3, 3] = 1
pose[:3, :4] = self._poses[frame_id,:].reshape(3, 4)
R = np.array(([0, -1, 0], [0, 0, -1], [1, 0, 0])) #kitti odometry to rviz coords
R_4 = np.zeros((4,4))
R_4[:3,:3] = R
R_4[3,3] = 1
pose = np.matmul(np.linalg.inv(R_4), np.matmul(pose, R_4))
global_pose = pose.astype(np.float32)
return global_pose
# Use all frames, if there is no data then zero pad
def __len__(self):
return sum(self._num_frames_scene)
def __getitem__(self, idx):
# -1 indicates no data
# the final index is the output
idx_2 = int(self._frames_list_label[idx])
idx_range = self.find_horizon(idx_2)
current_points = []
current_labels = []
ego_pose = self.get_pose(idx_range[-1])
to_ego = np.linalg.inv(ego_pose)
aug_index = np.random.randint(0,3) # Set end idx to 6 to do rotations
aug_mat = self.get_aug_matrix(aug_index)
gt_labels = None
temp_gt_labels = np.fromfile(self._label_list[idx], dtype=np.uint8).reshape((-1))
for i in idx_range:
if i == -1: # Zero pad
points = np.zeros((1, 3), dtype=np.float32)
if self.to_continuous:
labels = np.zeros((1,self.num_classes), dtype=np.float32)
else:
labels = np.zeros((1, 1), dtype=np.uint8)
else:
points = np.fromfile(self._velodyne_list[i],dtype=np.float32).reshape(-1,3)
if self.apply_transform:
global_pose = self.get_pose(i)
relative_pose = np.matmul(to_ego, global_pose)
points = np.dot(relative_pose[:3, :3], points.T).T + relative_pose[:3, 3]
if self.from_continuous:
labels = np.fromfile(self._pred_list[i], dtype=np.uint8).reshape((-1, self.num_classes))
labels = (labels / 255).astype(np.float32)
if not self.to_continuous:
labels = np.argmax(labels, axis=1).reshape((-1, 1)).astype(np.uint8)
else:
labels = np.fromfile(self._pred_list[i], dtype=np.uint8).reshape((-1, 1))
# Perform data augmentation on points
if self.use_aug:
points = (aug_mat @ points.T).T
# Filter points outside of voxel grid
if i == idx_range[-1]:
grid_point_mask = np.all(
(points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask, :]
temp_gt_labels = temp_gt_labels[grid_point_mask]
labels = labels[grid_point_mask,:]
# Remove ignored classes (sky, person, bicycle)
ignored_classes = np.all((temp_gt_labels != 7, temp_gt_labels != 8, temp_gt_labels != 10), axis=0)
points = points[ignored_classes, :]
temp_gt_labels = temp_gt_labels[ignored_classes]
labels = labels[ignored_classes,:]
gt_labels = temp_gt_labels
else:
grid_point_mask = np.all(
(points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask, :]
labels = labels[grid_point_mask, :]
# Remove zero labels
if self.from_continuous:
labels_t = np.argmax(labels, axis=1).reshape((-1, 1)).astype(np.uint8)
else:
labels_t = labels
# Remove ignored classes (sky, person, bicycle)
ignored_classes = np.all((labels_t != 7,labels_t != 8,labels_t != 10), axis=0).squeeze()
points = points[ignored_classes, :]
labels = labels[ignored_classes, :]
points = points.astype(np.float32)
current_points.append(points)
current_labels.append(labels)
return current_points, current_labels, gt_labels.astype(np.uint8).reshape(-1, 1)
def find_horizon(self, idx):
end_idx = idx
idx_range = np.arange(idx-self._num_frames, idx)+1
diffs = np.asarray([int(self._frames_list[end_idx]) - int(self._frames_list[i]) for i in idx_range])
good_difs = -1 * (np.arange(-self._num_frames, 0) + 1)
idx_range[good_difs != diffs] = -1
return idx_range
def points_to_voxels(self, voxel_grid, points, t_i):
# Valid voxels (make sure to clip)
valid_point_mask= np.all(
(points < self.max_bound) & (points >= self.min_bound), axis=1)
valid_points = points[valid_point_mask, :]
voxels = np.floor((valid_points - self.min_bound) / self.voxel_sizes).astype(int)
# Clamp to account for any floating point errors
maxes = np.reshape(self.grid_dims - 1, (1, 3))
mins = np.zeros_like(maxes)
voxels = np.clip(voxels, mins, maxes).astype(int)
# This line is needed to create a mask with number of points, not just binary occupied
if self.binary_counts:
voxel_grid[t_i, voxels[:, 0], voxels[:, 1], voxels[:, 2]] += 1
else:
unique_voxels, counts = np.unique(voxels, return_counts=True, axis=0)
unique_voxels = unique_voxels.astype(int)
voxel_grid[t_i, unique_voxels[:, 0], unique_voxels[:, 1], unique_voxels[:, 2]] += counts
return voxel_grid
def get_test_item(self, idx, get_gt=False):
frame_id = idx # Frame ID in current scene ID
idx_2 = int(self._frames_list_label[idx])
frame_id_2 = idx_2
global_pose = self.get_pose(frame_id_2)
if frame_id > 0:
prior_pose = self.get_pose(frame_id_2 - 1)
else:
prior_pose = global_pose
points = np.fromfile(self._velodyne_list[frame_id_2], dtype=np.float32).reshape(-1, 3)[:, :3]
if get_gt:
gt_labels = np.fromfile(self._label_list[frame_id], dtype=np.uint8).reshape((-1))
if self.from_continuous:
pred_labels = np.fromfile(self._pred_list[frame_id_2], dtype=np.uint8).reshape((-1, self.num_classes))
pred_labels = (pred_labels / 255).astype(np.float32)
if not self.to_continuous:
pred_labels = np.argmax(pred_labels, axis=1).reshape((-1, 1))
else:
pred_labels = np.fromfile(self._pred_list[frame_id_2], dtype=np.uint8).reshape((-1, 1))
# Filter points outside of voxel grid
grid_point_mask = np.all( (points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask, :]
if get_gt:
gt_labels = gt_labels[grid_point_mask]
pred_labels = pred_labels[grid_point_mask, :]
# Remove ignored classes (sky, person, bicycle)
if get_gt:
ignored_classes = np.all((gt_labels != 7,gt_labels != 8,gt_labels != 10), axis=0).squeeze()
points = points[ignored_classes, :]
pred_labels = pred_labels[ignored_classes, :]
gt_labels = gt_labels[ignored_classes]
else:
ignored_classes = np.all((pred_labels != 7,pred_labels != 8,pred_labels != 10), axis=0).squeeze()
points = points[ignored_classes, :]
pred_labels = pred_labels[ignored_classes, :]
scene_id = self._scene_id[idx]
if get_gt:
return global_pose, points, pred_labels, gt_labels.astype(np.uint8).reshape(-1, 1), scene_id, frame_id
else:
return global_pose, points, pred_labels, None, scene_id, frame_id
| 14,054 | 40.217009 | 123 | py |
NeuralBKI | NeuralBKI-main/Data/utils.py | import os
import pdb
from matplotlib import markers
import rospy
import numpy as np
import time
import os
import pdb
import torch
from visualization_msgs.msg import *
from geometry_msgs.msg import Point32
from std_msgs.msg import ColorRGBA
# Intersection, union for one frame
def iou_one_frame(pred, target, n_classes=21):
pred = pred.reshape(-1)
target = target.reshape(-1)
intersection = np.zeros(n_classes)
union = np.zeros(n_classes)
for cls in range(n_classes):
pred_inds = pred == cls
target_inds = target == cls
intersection[cls] = (pred_inds[target_inds]).long().sum().item() # Cast to long to prevent overflows
union[cls] = pred_inds.long().sum().item() + target_inds.long().sum().item() - intersection[cls]
return intersection, union
def points_to_voxels_torch(voxel_grid, points, min_bound, grid_dims, voxel_sizes):
voxels = torch.floor((points - min_bound) / voxel_sizes).to(dtype=torch.int)
# Clamp to account for any floating point errors
maxes = (grid_dims - 1).reshape(1, 3)
mins = torch.zeros_like(maxes)
voxels = torch.clip(voxels, mins, maxes).to(dtype=torch.long)
voxel_grid = voxel_grid[voxels[:, 0], voxels[:, 1], voxels[:, 2]]
return voxel_grid
# Remap colors to np array 0 to 1
def remap_colors(colors):
# color
colors_temp = np.zeros((len(colors), 3))
for i in range(len(colors)):
colors_temp[i, :] = colors[i]
colors = colors_temp.astype("int")
colors = colors / 255.0
return colors
def publish_voxels(map_object, min_dim, max_dim, grid_dims, colors, next_map):
next_map.markers.clear()
marker = Marker()
marker.id = 0
marker.ns = "Global_Semantic_Map"
marker.header.frame_id = "map" # change this to match model + scene name LMSC_000001
marker.type = marker.CUBE_LIST
marker.action = marker.ADD
marker.lifetime.secs = 0
marker.header.stamp = rospy.Time.now()
marker.pose.orientation.x = 0.0
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1
marker.scale.x = (max_dim[0] - min_dim[0]) / grid_dims[0]
marker.scale.y = (max_dim[1] - min_dim[1]) / grid_dims[1]
marker.scale.z = (max_dim[2] - min_dim[2]) / grid_dims[2]
semantic_labels = map_object.global_map[:,3:]
centroids = map_object.global_map[:, :3]
# Threshold here
total_probs = np.sum(semantic_labels, axis=-1, keepdims=False)
not_prior = total_probs > 1
semantic_labels = semantic_labels[not_prior, :]
centroids = centroids[not_prior, :]
semantic_labels = np.argmax(semantic_labels, axis=-1)
semantic_labels = semantic_labels.reshape(-1, 1)
for i in range(semantic_labels.shape[0]):
pred = semantic_labels[i]
point = Point32()
color = ColorRGBA()
point.x = centroids[i, 0]
point.y = centroids[i, 1]
point.z = centroids[i, 2]
color.r, color.g, color.b = colors[pred].squeeze()
color.a = 1.0
marker.points.append(point)
marker.colors.append(color)
next_map.markers.append(marker)
return next_map
def publish_local_map(labeled_grid, centroids, grid_params, colors, next_map):
max_dim = grid_params["max_bound"]
min_dim = grid_params["min_bound"]
grid_dims = grid_params["grid_size"]
next_map.markers.clear()
marker = Marker()
marker.id = 0
marker.ns = "Local Semantic Map"
marker.header.frame_id = "map"
marker.type = marker.CUBE_LIST
marker.action = marker.ADD
marker.lifetime.secs = 0
marker.header.stamp = rospy.Time.now()
marker.pose.orientation.x = 0.0
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1
marker.scale.x = (max_dim[0] - min_dim[0]) / grid_dims[0]
marker.scale.y = (max_dim[1] - min_dim[1]) / grid_dims[1]
marker.scale.z = (max_dim[2] - min_dim[2]) / grid_dims[2]
X, Y, Z, C = labeled_grid.shape
semantic_labels = labeled_grid.view(-1, C).detach().cpu().numpy()
centroids = centroids.detach().cpu().numpy()
semantic_sums = np.sum(semantic_labels, axis=-1, keepdims=False)
valid_mask = semantic_sums >= 1
semantic_labels = semantic_labels[valid_mask, :]
centroids = centroids[valid_mask, :]
semantic_labels = np.argmax(semantic_labels / np.sum(semantic_labels, axis=-1, keepdims=True), axis=-1)
semantic_labels = semantic_labels.reshape(-1, 1)
for i in range(semantic_labels.shape[0]):
pred = semantic_labels[i]
point = Point32()
color = ColorRGBA()
point.x = centroids[i, 0]
point.y = centroids[i, 1]
point.z = centroids[i, 2]
color.r, color.g, color.b = colors[pred].squeeze()
color.a = 1.0
marker.points.append(point)
marker.colors.append(color)
next_map.markers.append(marker)
return next_map | 4,931 | 31.662252 | 109 | py |
NeuralBKI | NeuralBKI-main/Data/SemanticKitti.py | import os
import numpy as np
# from utils import laserscan
import yaml
from torch.utils.data import Dataset
import torch
# import spconv
import math
from scipy.spatial.transform import Rotation as R
config_file = os.path.join('Config/semantic_kitti.yaml')
kitti_config = yaml.safe_load(open(config_file, 'r'))
remapdict = kitti_config["learning_map"]
LABELS_REMAP = kitti_config["learning_map"]
LABEL_INV_REMAP = kitti_config["learning_map_inv"]
SPLIT_SEQUENCES = kitti_config["SPLIT_SEQUENCES"]
def grid_ind(input_pc, labels, min_bound, max_bound, grid_size, voxel_sizes):
'''
Input:
input_xyz: N * (x, y, z, c) float32 array, point cloud
Output:
grid_inds: N' * (x, y, z, c) int32 array, point cloud mapped to voxels
'''
input_xyz = input_pc[:, :3]
valid_input_mask = np.all((input_xyz < max_bound) & (input_xyz >= min_bound), axis=1)
valid_xyz = input_xyz[valid_input_mask]
labels = labels[valid_input_mask]
grid_inds = np.floor((valid_xyz - min_bound) / voxel_sizes)
maxes = (grid_size - 1).reshape(1, 3)
clipped_inds = np.clip(grid_inds, np.zeros_like(maxes), maxes)
return clipped_inds, labels, valid_xyz
def unpack(compressed):
''' given a bit encoded voxel grid, make a normal voxel grid out of it. '''
uncompressed = np.zeros(compressed.shape[0] * 8, dtype=np.uint8)
uncompressed[::8] = compressed[:] >> 7 & 1
uncompressed[1::8] = compressed[:] >> 6 & 1
uncompressed[2::8] = compressed[:] >> 5 & 1
uncompressed[3::8] = compressed[:] >> 4 & 1
uncompressed[4::8] = compressed[:] >> 3 & 1
uncompressed[5::8] = compressed[:] >> 2 & 1
uncompressed[6::8] = compressed[:] >> 1 & 1
uncompressed[7::8] = compressed[:] & 1
return uncompressed
class KittiDataset(Dataset):
"""Kitti Dataset for Neural BKI project
Access to the processed data, including evaluation labels predictions velodyne poses times
"""
def __init__(self,
grid_params,
directory="/home/jason/Data/kitti",
device='cuda',
num_frames=4,
voxelize_input=True,
binary_counts=False,
random_flips=False,
use_aug=True,
apply_transform=True,
remap=True,
data_split='train',
from_continuous=False,
to_continuous=False,
pred_path="predictions_darknet",
num_classes=20,
remove_zero=False
):
self.remove_zero = remove_zero
self.from_continuous = from_continuous
self.to_continuous = to_continuous
self.use_aug = use_aug
self.apply_transform = apply_transform
self.num_classes = num_classes
self._grid_size = grid_params['grid_size']
self.grid_dims = np.asarray(self._grid_size)
self._eval_size = list(np.uint32(self._grid_size))
self.coor_ranges = grid_params['min_bound'] + grid_params['max_bound']
self.voxel_sizes = [abs(self.coor_ranges[3] - self.coor_ranges[0]) / self._grid_size[0],
abs(self.coor_ranges[4] - self.coor_ranges[1]) / self._grid_size[1],
abs(self.coor_ranges[5] - self.coor_ranges[2]) / self._grid_size[2]]
self.min_bound = np.asarray(self.coor_ranges[:3])
self.max_bound = np.asarray(self.coor_ranges[3:])
self.voxel_sizes = np.asarray(self.voxel_sizes)
self.voxelize_input = voxelize_input
self.binary_counts = binary_counts
self._directory = os.path.join(directory, 'sequences')
self._num_frames = num_frames
self.device = device
self.random_flips = random_flips
self.remap = remap
self.split = data_split
self._remap_lut = self.get_remap_lut()
self._velodyne_list = []
self._label_list = []
self._pred_list = []
self._eval_labels = []
self._eval_valid = []
self._frames_list = []
self._timestamps = []
self._poses = np.empty((0,12))
self._Tr = np.empty((0,12))
self._num_frames_scene = []
self._seqs = SPLIT_SEQUENCES[self.split]
self._scene_id = []
for seq in self._seqs:
velodyne_dir = os.path.join(self._directory, seq, 'velodyne')
label_dir = os.path.join(self._directory, seq, 'labels')
preds_dir = os.path.join(self._directory, seq, pred_path)
self._num_frames_scene.append(len(os.listdir(velodyne_dir)))
self._scene_id += [seq] * len(os.listdir(velodyne_dir))
frames_list = [os.path.splitext(filename)[0] for filename in sorted(os.listdir(velodyne_dir))]
pose = np.loadtxt(os.path.join(self._directory, seq, 'poses.txt'))
Tr = np.genfromtxt(os.path.join(self._directory, seq, 'calib.txt'))[-1,1:]
Tr = np.repeat(np.expand_dims(Tr, axis=1).T,pose.shape[0],axis=0)
self._Tr = np.vstack((self._Tr, Tr))
self._poses = np.vstack((self._poses, pose))
self._frames_list.extend(frames_list)
self._velodyne_list.extend([os.path.join(velodyne_dir, str(frame).zfill(6)+'.bin') for frame in frames_list])
self._label_list.extend([os.path.join(label_dir, str(frame).zfill(6)+'.label') for frame in frames_list])
self._pred_list.extend([os.path.join(preds_dir, str(frame).zfill(6)+'.label') for frame in frames_list])
assert len(self._velodyne_list) == np.sum(self._num_frames_scene), f"inconsitent number of frames detected, check the dataset"
# self._poses = np.concatenate(self._poses)
self._poses = self._poses.reshape(sum(self._num_frames_scene), 12)
self._Tr = self._Tr.reshape(sum(self._num_frames_scene), 12)
def collate_fn(self, data):
points_batch = [bi[0] for bi in data]
label_batch = [bi[1] for bi in data]
gt_label_batch = [bi[2] for bi in data]
return points_batch, label_batch, gt_label_batch
def get_aug_matrix(self, trans):
"""
trans - 1 or 2 specifies reflection about XZ or YZ plane
any other value gives rotation matrix
Double checked with rotation matrix calculator
"""
if trans==1:
trans = np.eye(3)
trans[1][1] = -1
elif trans==2:
trans = np.eye(3)
trans[0][0] = -1
else:
if trans==0:
angle = 0
else:
angle = (trans-2)*90
trans = R.from_euler('z', angle, degrees=True).as_matrix()
return trans
def get_pose(self, frame_id):
pose = np.zeros((4, 4))
pose[3, 3] = 1
pose[:3, :4] = self._poses[frame_id,:].reshape(3, 4)
Tr = np.zeros((4, 4))
Tr[3, 3] = 1
Tr[:3, :4] = self._Tr[frame_id,:].reshape(3,4)
Tr = Tr.astype(np.float32)
pose = pose.astype(np.float32)
global_pose = np.matmul(np.linalg.inv(Tr), np.matmul(pose, Tr))
return global_pose
# Use all frames, if there is no data then zero pad
def __len__(self):
return sum(self._num_frames_scene)
def get_inv_remap_lut(self):
'''
remap_lut to remap classes of semantic kitti for training...
:return:
'''
# make lookup table for mapping
maxkey = max(LABEL_INV_REMAP.keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 1), dtype=np.int32)
remap_lut[list(LABEL_INV_REMAP.keys())] = list(LABEL_INV_REMAP.values())
return remap_lut
def get_remap_lut(self):
'''
remap_lut to remap classes of semantic kitti for training...
:return:
'''
# make lookup table for mapping
maxkey = max(LABELS_REMAP.keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 100), dtype=np.int32)
remap_lut[list(LABELS_REMAP.keys())] = list(LABELS_REMAP.values())
# in completion we have to distinguish empty and invalid voxels.
# Important: For voxels 0 corresponds to "empty" and not "unlabeled".
remap_lut[remap_lut == 0] = 0 # keep 0 as ignore
remap_lut[0] = 0 # only 'empty' stays 'empty'.
return remap_lut
def __getitem__(self, idx):
# -1 indicates no data
# the final index is the output
idx_range = self.find_horizon(idx)
current_points = []
current_labels = []
ego_pose = self.get_pose(idx_range[-1])
to_ego = np.linalg.inv(ego_pose)
aug_index = np.random.randint(0,3) # Set end idx to 6 to do rotations
aug_mat = self.get_aug_matrix(aug_index)
gt_labels = None
for i in idx_range:
if i == -1: # Zero pad
points = np.zeros((1, 3), dtype=np.float32)
if self.to_continuous:
labels = np.zeros((1,self.num_classes), dtype=np.float32)
else:
labels = np.zeros((1, 1), dtype=np.uint8)
else:
points = np.fromfile(self._velodyne_list[i],dtype=np.float32).reshape(-1,4)[:, :3]
if self.apply_transform:
global_pose = self.get_pose(i)
relative_pose = np.matmul(to_ego, global_pose)
points = np.dot(relative_pose[:3, :3], points.T).T + relative_pose[:3, 3]
temp_gt_labels = np.fromfile(self._label_list[i], dtype=np.uint32) & 0xFFFF
temp_gt_labels = temp_gt_labels.reshape((-1)).astype(np.uint8)
if not self.from_continuous:
labels = np.fromfile(self._pred_list[i], dtype=np.uint32).reshape((-1, 1)).astype(np.uint8)
if self.from_continuous:
labels = np.fromfile(self._pred_list[i], dtype=np.float32).reshape((-1, self.num_classes))
if not self.to_continuous:
labels = np.argmax(labels, axis=1).reshape((-1, 1)).astype(np.uint8)
# Perform data augmentation on points
if self.use_aug:
points = (aug_mat @ points.T).T
# Filter points outside of voxel grid
grid_point_mask = np.all(
(points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask, :]
temp_gt_labels = temp_gt_labels[grid_point_mask]
labels = labels[grid_point_mask, :]
# Remove zero labels
if self.remove_zero:
void_mask = temp_gt_labels != 0
points = points[void_mask, :]
temp_gt_labels = temp_gt_labels[void_mask]
labels = labels[void_mask, :]
if self.remap:
temp_gt_labels = self._remap_lut[temp_gt_labels].astype(np.uint8)
if not self.from_continuous:
labels = self._remap_lut[labels].astype(np.uint8)
if i == idx_range[-1]:
gt_labels = temp_gt_labels
points = points.astype(np.float32) #[:, [1, 0, 2]]
current_points.append(points)
current_labels.append(labels)
return current_points, current_labels, gt_labels.astype(np.uint8).reshape(-1, 1)
def find_horizon(self, idx):
end_idx = idx
idx_range = np.arange(idx-self._num_frames, idx)+1
diffs = np.asarray([int(self._frames_list[end_idx]) - int(self._frames_list[i]) for i in idx_range])
good_difs = -1 * (np.arange(-self._num_frames, 0) + 1)
idx_range[good_difs != diffs] = -1
return idx_range
def points_to_voxels(self, voxel_grid, points, t_i):
# Valid voxels (make sure to clip)
valid_point_mask= np.all(
(points < self.max_bound) & (points >= self.min_bound), axis=1)
valid_points = points[valid_point_mask, :]
voxels = np.floor((valid_points - self.min_bound) / self.voxel_sizes).astype(int)
# Clamp to account for any floating point errors
maxes = np.reshape(self.grid_dims - 1, (1, 3))
mins = np.zeros_like(maxes)
voxels = np.clip(voxels, mins, maxes).astype(int)
# This line is needed to create a mask with number of points, not just binary occupied
if self.binary_counts:
voxel_grid[t_i, voxels[:, 0], voxels[:, 1], voxels[:, 2]] += 1
else:
unique_voxels, counts = np.unique(voxels, return_counts=True, axis=0)
unique_voxels = unique_voxels.astype(int)
voxel_grid[t_i, unique_voxels[:, 0], unique_voxels[:, 1], unique_voxels[:, 2]] += counts
return voxel_grid
def get_test_item(self, idx, get_gt=False):
frame_id = idx # Frame ID in current scene ID
global_pose = self.get_pose(frame_id)
if frame_id > 0:
prior_pose = self.get_pose(frame_id - 1)
else:
prior_pose = global_pose
points = np.fromfile(self._velodyne_list[frame_id], dtype=np.float32).reshape(-1, 4)[:, :3]
if get_gt:
gt_labels = np.fromfile(self._label_list[frame_id], dtype=np.uint32) & 0xFFFF
gt_labels = gt_labels.reshape((-1)).astype(np.uint8)
if not self.from_continuous:
pred_labels = np.fromfile(self._pred_list[frame_id], dtype=np.uint32).reshape((-1, 1)).astype(np.uint8)
if self.from_continuous:
pred_labels = np.fromfile(self._pred_list[frame_id], dtype=np.float32).reshape((-1, self.num_classes))
if not self.to_continuous:
pred_labels = np.argmax(pred_labels, axis=1).reshape((-1, 1))
# Remove zero labels
if get_gt and self.remove_zero:
grid_point_mask = np.all((points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask, :]
gt_labels = gt_labels[grid_point_mask]
pred_labels = pred_labels[grid_point_mask, :]
void_mask = gt_labels != 0
points = points[void_mask, :]
gt_labels = gt_labels[void_mask]
pred_labels = pred_labels[void_mask, :]
if self.remap:
if get_gt:
gt_labels = self._remap_lut[gt_labels].astype(np.uint8)
if not self.from_continuous:
pred_labels = self._remap_lut[pred_labels].astype(np.uint8)
scene_id = self._scene_id[idx]
if get_gt:
return global_pose, points, pred_labels, gt_labels.astype(np.uint8).reshape(-1, 1), scene_id, frame_id
else:
return global_pose, points, pred_labels, None, scene_id, frame_id
| 15,165 | 39.878706 | 134 | py |
NeuralBKI | NeuralBKI-main/Data/Rellis3D.py | ## Maintainer: Arthur Zhang #####
## Contact: arthurzh@umich.edu #####
import os
import pdb
import math
import numpy as np
import random
import json
import yaml
from sklearn.metrics import homogeneity_completeness_v_measure
import torch
from torch import gt
import torch.nn.functional as F
from torch.utils.data import Dataset
from scipy.spatial.transform import Rotation as R
from Data.utils import *
def unpack(compressed):
''' given a bit encoded voxel grid, make a normal voxel grid out of it. '''
uncompressed = np.zeros(compressed.shape[0] * 8, dtype=np.uint8)
uncompressed[::8] = compressed[:] >> 7 & 1
uncompressed[1::8] = compressed[:] >> 6 & 1
uncompressed[2::8] = compressed[:] >> 5 & 1
uncompressed[3::8] = compressed[:] >> 4 & 1
uncompressed[4::8] = compressed[:] >> 3 & 1
uncompressed[5::8] = compressed[:] >> 2 & 1
uncompressed[6::8] = compressed[:] >> 1 & 1
uncompressed[7::8] = compressed[:] & 1
return uncompressed
class Rellis3dDataset(Dataset):
"""Rellis3D Dataset for Neural BKI project
Access to the processed data, including evaluation labels predictions velodyne poses times
"""
def __init__(self,
grid_params,
directory,
device='cuda',
num_frames=20,
remap=True,
use_aug=True,
apply_transform=True,
model_name="salsa",
data_split="train"
):
'''Constructor.
Parameters:
directory: directory to the dataset
'''
self._directory = directory
self._num_frames = num_frames
self.device = device
self.remap = remap
self.use_aug = use_aug
self.apply_transform = apply_transform
self._scenes = [ s for s in sorted(os.listdir(self._directory)) if s.isdigit() ]
self._num_scenes = len(self._scenes)
self._num_frames_scene = 0
self._velodyne_list = []
self._label_list = []
self._pred_list = []
self._voxel_label_list = []
self._occupied_list = []
self._invalid_list = []
self._frames_list = []
self._timestamps = []
self._poses = []
self._num_frames_by_scene = []
split_dir = os.path.join(self._directory, "pt_"+data_split+".lst")
data_params_file = os.path.join(os.getcwd(), "Config", "rellis.yaml")
with open(data_params_file, "r") as stream:
try:
data_params = yaml.safe_load(stream)
self._num_labels = data_params["num_classes"]
max_label = max([i for i in data_params["LABELS_REMAP"].keys()])
self.LABELS_REMAP = np.zeros(max_label + 1, dtype=np.long)
for v,k in data_params["LABELS_REMAP"].items():
self.LABELS_REMAP[v] = k
except yaml.YAMLError as exc:
print(exc)
self._grid_size = grid_params['grid_size']
self.grid_dims = np.asarray(self._grid_size)
self.coor_ranges = grid_params['min_bound'] + grid_params['max_bound']
self.voxel_sizes = np.asarray([abs(self.coor_ranges[3] - self.coor_ranges[0]) / self._grid_size[0],
abs(self.coor_ranges[4] - self.coor_ranges[1]) / self._grid_size[1],
abs(self.coor_ranges[5] - self.coor_ranges[2]) / self._grid_size[2]])
self.min_bound = np.asarray(self.coor_ranges[:3])
self.max_bound = np.asarray(self.coor_ranges[3:])
# Generate list of scenes and indices to iterate over
self._scenes_list = []
self._index_list = []
with open(split_dir, 'r') as split_file:
for line in split_file:
image_path = line.split(' ')
image_path_lst = image_path[0].split('/')
scene_num = image_path_lst[0]
frame_index = int(image_path_lst[2][0:6])
self._scenes_list.append(scene_num)
self._index_list.append(frame_index)
for scene_id in range(self._num_scenes):
scene_name = self._scenes[scene_id]
velodyne_dir = os.path.join(self._directory, scene_name, 'os1_cloud_node_kitti_bin')
label_dir = os.path.join(self._directory, scene_name, 'os1_cloud_node_semantickitti_label_id')
pred_dir = os.path.join(self._directory, scene_name, model_name, 'os1_cloud_node_semantickitti_label_id')
# Load all poses and frame indices regardless of mode
self._poses.append(np.loadtxt(os.path.join(self._directory, scene_name, 'poses.txt')).reshape(-1, 12) )
self._frames_list.append([os.path.splitext(filename)[0] for filename in sorted(os.listdir(velodyne_dir))])
self._num_frames_by_scene.append(len(self._frames_list[scene_id]))
# PC inputs
self._velodyne_list.append( [os.path.join(velodyne_dir,
str(frame).zfill(6)+'.bin') for frame in self._frames_list[scene_id]] )
self._label_list.append( [os.path.join(label_dir,
str(frame).zfill(6)+'.label') for frame in self._frames_list[scene_id]] )
self._pred_list.append( [os.path.join(pred_dir,
str(frame).zfill(6)+'.label') for frame in self._frames_list[scene_id]] )
# Get number of frames to iterate over
self._num_frames_scene = len(self._index_list)
# Use all frames, if there is no data then zero pad
def __len__(self):
return self._num_frames_scene
def collate_fn(self, data):
points_batch = [bi[0] for bi in data]
label_batch = [bi[1] for bi in data]
gt_label_batch = [bi[2] for bi in data]
return points_batch, label_batch, gt_label_batch
def get_file_path(self, idx):
print(self._frames_list[idx])
def get_aug_matrix(self, trans):
"""
trans - 1 or 2 specifies reflection about XZ or YZ plane
any other value gives rotation matrix
Double checked with rotation matrix calculator
"""
if trans==1:
trans = np.eye(3)
trans[1][1] = -1
elif trans==2:
trans = np.eye(3)
trans[0][0] = -1
else:
if trans==0:
angle = 0
else:
angle = (trans-2)*90
trans = R.from_euler('z', angle, degrees=True).as_matrix()
return trans
def get_pose(self, scene_id, frame_id):
pose = np.zeros((4, 4))
pose[3, 3] = 1
pose[:3, :4] = self._poses[scene_id][frame_id].reshape(3, 4)
return pose
def __getitem__(self, idx):
scene_name = self._scenes_list[idx]
scene_id = int(scene_name) # Scene ID
frame_id = self._index_list[idx] # Frame ID in current scene ID
idx_range = self.find_horizon(scene_id, frame_id)
current_points = []
current_labels = []
ego_pose = self.get_pose(scene_id, idx_range[-1])
to_ego = np.linalg.inv(ego_pose)
aug_index = np.random.randint(0,3) # Set end idx to 6 to do rotations
aug_mat = self.get_aug_matrix(aug_index)
gt_labels = None
for i in idx_range:
if i == -1: # Zero pad
points = np.zeros((1, 3), dtype=np.float16)
labels = np.zeros((1,), dtype=np.uint8)
else:
points = np.fromfile(self._velodyne_list[scene_id][i], dtype=np.float32).reshape(-1,4)[:, :3]
if self.apply_transform:
to_world = self.get_pose(scene_id, i)
to_world = to_world
relative_pose = np.matmul(to_ego, to_world)
points = np.dot(relative_pose[:3, :3], points.T).T + relative_pose[:3, 3]
temp_gt_labels = np.fromfile(self._label_list[scene_id][i], dtype=np.uint32).reshape((-1)).astype(np.uint8)
labels = np.fromfile(self._pred_list[scene_id][i], dtype=np.uint32).reshape((-1)).astype(np.uint8)
# Perform data augmentation on points
if self.use_aug:
points = (aug_mat @ points.T).T
# Filter points outside of voxel grid
grid_point_mask = np.all(
(points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask, :]
temp_gt_labels = temp_gt_labels[grid_point_mask]
labels = labels[grid_point_mask]
# Remove zero labels
void_mask = temp_gt_labels != 0
points = points[void_mask, :]
temp_gt_labels = temp_gt_labels[void_mask]
labels = labels[void_mask]
if self.remap:
temp_gt_labels = self.LABELS_REMAP[temp_gt_labels].astype(np.uint8)
labels = self.LABELS_REMAP[labels].astype(np.uint8)
if i == idx_range[-1]:
gt_labels = temp_gt_labels
labels = labels.reshape(-1, 1)
points = points.astype(np.float32) #[:, [1, 0, 2]]
labels = labels.astype(np.uint8)
current_points.append(points)
current_labels.append(labels)
return current_points, current_labels, gt_labels.astype(np.uint8).reshape(-1, 1)
def find_horizon(self, scene_id, idx):
end_idx = idx
idx_range = np.arange(idx- self._num_frames, idx)+1
diffs = np.asarray([int(self._frames_list[scene_id][end_idx])
- int(self._frames_list[scene_id][i]) for i in idx_range])
good_diffs = -1 * (np.arange(- self._num_frames, 0) + 1)
idx_range[good_diffs != diffs] = -1
return idx_range
def get_test_item(self, idx):
scene_name = self._scenes_list[idx]
scene_id = int(scene_name) # Scene ID
frame_id = self._index_list[idx] # Frame ID in current scene ID
pose = self.get_pose(scene_id, frame_id)
points = np.fromfile(self._velodyne_list[scene_id][frame_id], dtype=np.float32).reshape(-1, 4)[:, :3]
gt_labels = np.fromfile(self._label_list[scene_id][frame_id], dtype=np.uint32).reshape((-1)).astype(np.uint8)
pred_labels = np.fromfile(self._pred_list[scene_id][frame_id], dtype=np.uint32).reshape((-1)).astype(np.uint8)
# Filter points outside of voxel grid
grid_point_mask = np.all( (points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask, :]
gt_labels = gt_labels[grid_point_mask]
pred_labels = pred_labels[grid_point_mask]
# Remove zero labels
void_mask = gt_labels != 0
points = points[void_mask, :]
gt_labels = gt_labels[void_mask]
pred_labels = pred_labels[void_mask]
if self.remap:
gt_labels = self.LABELS_REMAP[gt_labels].astype(np.uint8)
pred_labels = self.LABELS_REMAP[pred_labels].astype(np.uint8)
return pose.astype(np.float32), points, pred_labels.astype(np.uint8).reshape(-1, 1), gt_labels.astype(np.uint8).reshape(-1, 1), scene_id, frame_id | 11,319 | 38.719298 | 154 | py |
NeuralBKI | NeuralBKI-main/Models/model_utils.py | import pdb
import torch
import random
import numpy as np
from torch import empty
from torch import long
from Models.ConvBKI import ConvBKI
def setup_seed(seed=42):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def measure_inf_time(model, inputs, reps=300):
print(inputs.shape)
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
timings = np.zeros((reps, 1))
model.eval()
with torch.no_grad():
current_map = model.initialize_grid()
for rep in range(reps):
starter.record()
_ = model(current_map, inputs)
ender.record()
# WAIT FOR GPU SYNC
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender)
timings[rep] = curr_time
mean_syn = np.sum(timings) / reps
std_syn = np.std(timings)
print(mean_syn)
def get_model(model_name, model_params):
# Model parameters
grid_params = model_params["train"]["grid_params"]
device = model_params["device"]
try:
model = ConvBKI(
torch.tensor([int(p) for p in grid_params['grid_size']], dtype=torch.long).to(device), # Grid size
torch.tensor(grid_params['min_bound']).to(device), # Lower bound
torch.tensor(grid_params['max_bound']).to(device), # Upper bound
num_classes=model_params["num_classes"],
filter_size=model_params["filter_size"],
device=device,
datatype=model_params["datatype"],
kernel=model_params["kernel"],
max_dist=model_params["ell"],
per_class=model_params["per_class"],
compound=model_params["compound"]
)
except:
exit("Invalid config file.")
return model | 1,843 | 30.254237 | 111 | py |
NeuralBKI | NeuralBKI-main/Models/ConvBKI.py | import pdb
import os
import torch
torch.backends.cudnn.deterministic = True
import torch.nn.functional as F
class ConvBKI(torch.nn.Module):
def __init__(self, grid_size, min_bound, max_bound, filter_size=3,
num_classes=21, prior=0.001, device="cpu", datatype=torch.float32,
max_dist=0.5, kernel="sparse", per_class=False, compound=False):
'''
Input:
grid_size: (x, y, z) int32 array, number of voxels
min_bound: (x, y, z) float32 array, lower bound on local map
max_bound: (x, y, z) float32 array, upper bound on local map
filter_size: int, dimension of the kernel on each axis (must be odd)
num_classes: int, number of classes
prior: float32, value of prior in map
device: cpu or gpu
max_dist: size of the kernel ell parameter
kernel: kernel to choose
per_class: whether to learn a different kernel for each class
'''
super().__init__()
self.min_bound = min_bound.view(-1, 3).to(device)
self.max_bound = max_bound.view(-1, 3).to(device)
self.grid_size = grid_size
self.dtype = datatype
self.prior = prior
self.kernel = kernel
self.device = device
self.num_classes = num_classes
self.per_class = per_class
self.compound = compound
self.voxel_sizes = (self.max_bound.view(-1) - self.min_bound.view(-1)) / self.grid_size.to(self.device)
self.pi = torch.acos(torch.zeros(1)).item() * 2
self.max_dist = max_dist
self.filter_size = torch.tensor(filter_size, dtype=torch.long, requires_grad=False, device=self.device)
self.initialize_kernel()
[xs, ys, zs] = [(max_bound[i]-min_bound[i])/(2*grid_size[i]) +
torch.linspace(min_bound[i], max_bound[i], device=device, steps=grid_size[i]+1)[:-1]
for i in range(3)]
self.centroids = torch.cartesian_prod(xs, ys, zs).to(device)
def initialize_kernel(self):
# Initialize with sparse kernel
assert(self.filter_size % 2 == 1)
self.sigma = torch.tensor(1.0, device=self.device) # Kernel must map to 0 to 1
# Parameters
if self.kernel == "sparse":
if self.compound:
if self.per_class:
self.ell_h = torch.nn.Parameter(torch.tensor([self.max_dist] * self.num_classes, device=self.device))
self.ell_z = torch.nn.Parameter(torch.tensor([self.max_dist] * self.num_classes, device=self.device))
# self.ell_h = torch.nn.Parameter(0.2 + self.max_dist*torch.rand(self.num_classes, device=self.device))
# self.ell_z = torch.nn.Parameter(0.2 + self.max_dist*torch.rand(self.num_classes, device=self.device))
else:
self.ell_h = torch.nn.Parameter(torch.tensor(self.max_dist, device=self.device, dtype=self.dtype))
self.ell_z = torch.nn.Parameter(torch.tensor(self.max_dist, device=self.device, dtype=self.dtype))
else:
if self.per_class:
self.ell = torch.nn.Parameter(torch.tensor([self.max_dist] * self.num_classes, device=self.device))
# self.ell = torch.nn.Parameter(2*self.max_dist*torch.rand(self.num_classes, device=self.device))
else:
self.ell = torch.nn.Parameter(torch.tensor(self.max_dist, device=self.device, dtype=self.dtype))
# Distances
middle_ind = torch.floor(self.filter_size / 2)
if self.compound:
self.kernel_dists_h = torch.zeros([1, 1, self.filter_size, self.filter_size, self.filter_size],
device=self.device)
self.kernel_dists_z = torch.zeros([1, 1, self.filter_size, self.filter_size, self.filter_size],
device=self.device)
else:
self.kernel_dists = torch.zeros([1, 1, self.filter_size, self.filter_size, self.filter_size],
device=self.device)
for x_ind in range(self.filter_size):
for y_ind in range(self.filter_size):
for z_ind in range(self.filter_size):
x_dist = torch.abs(x_ind - middle_ind) * self.voxel_sizes[0]
y_dist = torch.abs(y_ind - middle_ind) * self.voxel_sizes[1]
z_dist = torch.abs(z_ind - middle_ind) * self.voxel_sizes[2]
if self.compound:
horiz_dist = torch.sqrt(x_dist ** 2 + y_dist ** 2)
vert_dist = torch.sqrt(z_dist ** 2)
self.kernel_dists_h[0, 0, x_ind, y_ind, z_ind] = horiz_dist
self.kernel_dists_z[0, 0, x_ind, y_ind, z_ind] = vert_dist
else:
total_dist = torch.sqrt(x_dist ** 2 + y_dist ** 2 + z_dist ** 2)
self.kernel_dists[0, 0, x_ind, y_ind, z_ind] = total_dist
def sparse_kernel(self, d, ell, sigma):
kernel_val = sigma * ((1.0/3)*(2 + torch.cos(2 * self.pi * d/ell))*(1 - d/ell) +
1.0/(2*self.pi) * torch.sin(2 * self.pi * d / ell))
kernel_val[d >= ell] = 0
return torch.clamp(kernel_val, min=0.0, max=1.0)
def calculate_kernel(self, i=0):
kernel_val = None
if self.kernel == "sparse":
if self.per_class:
if self.compound:
kernel_val = self.sparse_kernel(self.kernel_dists_z, self.ell_z[i], self.sigma) * \
self.sparse_kernel(self.kernel_dists_h, self.ell_h[i], self.sigma)
else:
kernel_val = self.sparse_kernel(self.kernel_dists, self.ell[i], self.sigma)
else:
kernel_val = self.sparse_kernel(self.kernel_dists, self.ell, self.sigma)
return kernel_val
def initialize_grid(self):
return torch.zeros(self.grid_size[0], self.grid_size[1], self.grid_size[2],
self.num_classes, device=self.device, requires_grad=True,
dtype=self.dtype) + self.prior
def grid_ind(self, input_pc, min_bound=None, max_bound=None):
'''
Input:
input_xyz: N * (x, y, z, c) float32 array, point cloud
Output:
grid_inds: N' * (x, y, z, c) int32 array, point cloud mapped to voxels
'''
if min_bound is None:
min_bound = self.min_bound
if max_bound is None:
max_bound = self.max_bound
input_xyz = input_pc[:, :3]
labels = input_pc[:, 3:]
valid_input_mask = torch.all((input_xyz < max_bound) & (input_xyz >= min_bound), axis=1)
valid_xyz = input_xyz[valid_input_mask]
valid_labels = labels[valid_input_mask]
grid_inds = torch.floor((valid_xyz - min_bound) / self.voxel_sizes)
maxes = (self.grid_size - 1).view(1, 3)
clipped_inds = torch.clamp(grid_inds, torch.zeros_like(maxes), maxes)
return torch.hstack( (clipped_inds, valid_labels) )
def get_filters(self):
filters = torch.zeros([self.num_classes, 1, self.filter_size, self.filter_size, self.filter_size],
device=self.device, dtype=self.dtype)
for temp_class in range(self.num_classes):
if self.per_class:
filters[temp_class, 0, :, :, :] = self.calculate_kernel(i=temp_class)
else:
filters[temp_class, 0, :, :, :] = self.calculate_kernel()
return filters
def add_to_update(self, update, grid_pc, continuous=False):
if continuous:
# Solution inspired by https://github.com/facebookresearch/SparseConvNet/blob/main/sparseconvnet/utils.py
xyz = grid_pc[:, :3]
feat = grid_pc[:, 3:]
xyz, inv, counts = torch.unique(xyz, dim=0, return_inverse=True, return_counts=True)
feat_out = torch.zeros(xyz.size(0), feat.size(1), dtype=torch.float32, device=self.device)
feat_out.index_add_(0, inv, feat)
grid_ind = xyz.to(torch.long)
update[grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2]] = feat_out
else:
unique_inds, counts = torch.unique(grid_pc.to(torch.long), return_counts=True, dim=0)
counts = counts.type(torch.long)
grid_indices = [unique_inds[:, i] for i in range(grid_pc.shape[1])]
update[grid_indices] = update[grid_indices] + counts
return update
def forward(self, current_map, point_cloud):
'''
Input:
current_map: (x, y, z, c) float32 array, prior dirichlet distribution over map
point_cloud: N * (x, y, z, c) float32 array, semantically labeled points
Output:
updated_map: (x, y, z, c) float32 array, posterior dirichlet distribution over map
'''
# Assume map and point cloud are already aligned
X, Y, Z, C = current_map.shape
update = torch.zeros_like(current_map, requires_grad=False)
N, C = point_cloud.shape
continuous = False
if C == self.num_classes + 3:
continuous = True
# 1: Discretize
grid_pc = self.grid_ind(point_cloud)
update = self.add_to_update(update, grid_pc, continuous)
# 2: Apply BKI filters
filters = self.get_filters()
update = torch.unsqueeze(update.permute(3, 0, 1, 2), 0)
update = F.conv3d(update, filters, padding="same", groups=self.num_classes)
new_update = torch.squeeze(update).permute(1, 2, 3, 0)
return current_map + new_update | 9,899 | 47.292683 | 123 | py |
NeuralBKI | NeuralBKI-main/Models/mapping_utils.py | # This file contains classes for local and global offline mapping (not running semantic prediction)
import torch
import torch.nn.functional as F
import numpy as np
import time
from Models.ConvBKI import ConvBKI
# TODO: Trilinear interpolation
# Save grid in CPU memory, load to GPU when needed for update step
# Voxels are stored in a matrix [X | Y | Z | C_0 | ... C_N] where C is semantic class
class GlobalMap(ConvBKI):
def __init__(self, grid_size, min_bound, max_bound, weights, filter_size, num_classes=21, ignore_labels = None, prior=0.001, device="cpu",
datatype=torch.float32, sparse=True, delete_time=10):
super().__init__(grid_size, min_bound, max_bound, filter_size=filter_size,
num_classes=num_classes, prior=prior, device=device, datatype=datatype)
self.ignore_labels = ignore_labels
self.weights = weights
self.reset_grid()
self.ConvLayer = torch.nn.Conv3d(num_classes, num_classes, filter_size, padding="same", groups=num_classes,
device=device, dtype=datatype, bias=False)
self.ConvLayer.weight.requires_grad = False
self.ConvLayer.weight[:, :, :, :, :] = weights.detach()[:, :, :, :, :]
self.ConvLayer.eval()
self.delete_time = delete_time
def reset_grid(self):
self.global_map = None
self.map_times = None
self.initial_pose = None
self.translation_discretized = np.zeros(3)
self.points_rotation = torch.eye(3, dtype=self.dtype, device=self.device)
self.points_translation = torch.zeros(3, dtype=self.dtype, device=self.device)
def inside_mask(self, min_bounds, max_bounds):
inside = np.all((self.global_map[:, :3] >= min_bounds) & (self.global_map[:, :3] < max_bounds), axis=1)
return inside
def get_local_map(self, min_bound=None, max_bound=None):
# Fetch local map from CPU (anything not seen is prior)
local_map = self.initialize_grid()
inside_mask = None
if min_bound is None:
min_bound = self.min_bound
if max_bound is None:
max_bound = self.max_bound
local_min_bound = min_bound + torch.from_numpy(self.voxel_translation).to(self.device)
local_max_bound = max_bound + torch.from_numpy(self.voxel_translation).to(self.device)
if self.global_map is not None:
inside_mask = self.inside_mask(local_min_bound.detach().cpu().numpy(), local_max_bound.detach().cpu().numpy())
allocated_map = torch.tensor(self.global_map[inside_mask], device=self.device, dtype=self.dtype)
grid_map = self.grid_ind(allocated_map, min_bound=local_min_bound, max_bound=local_max_bound)
grid_indices = grid_map[:, :3].to(torch.long)
local_map[grid_indices[:, 0], grid_indices[:, 1], grid_indices[:, 2], :] = allocated_map[:, 3:]
return local_map, local_min_bound, local_max_bound, inside_mask
# Uses saved weights instead of generating a filter
def update_map(self, semantic_preds):
semantic_preds = semantic_preds.to(self.dtype)
local_map, local_min_bound, local_max_bound, inside_mask = self.get_local_map()
# Rotate the point cloud and translate to global frame
global_pose = torch.from_numpy(self.global_pose).to(self.device)
semantic_preds[:, :3] = torch.matmul(global_pose[:3, :3], semantic_preds[:, :3].T).T + global_pose[:3, 3]
# Change to indices using our global frame bounds
grid_pc = self.grid_ind(semantic_preds, min_bound=local_min_bound, max_bound=local_max_bound)
# Update local map
update = torch.zeros_like(local_map, requires_grad=False)
continuous = False
N, C = semantic_preds.shape
if C == self.num_classes + 3:
continuous = True
update = self.add_to_update(update, grid_pc, continuous)
# Apply BKI filters
update = torch.unsqueeze(update.permute(3, 0, 1, 2), 0)
update = self.ConvLayer(update)
new_update = torch.squeeze(update).permute(1, 2, 3, 0)
# Find updated cells
local_map = local_map + new_update
updated_cells = (torch.mean(local_map, dim=3) > self.prior).view(-1)
updated_centroids = self.centroids[updated_cells, :] + torch.from_numpy(self.voxel_translation).to(self.device)
local_values = local_map.view(-1, self.num_classes)[updated_cells]
new_cells = torch.cat((updated_centroids, local_values), dim=1)
# Visited Times = 0
visited_times = torch.zeros(new_cells.shape[0], 1).detach().cpu().numpy()
# If empty
if self.global_map is None:
self.global_map = new_cells.detach().cpu().numpy()
self.map_times = visited_times
else:
# Replace local cells
outside_mask = ~ inside_mask
# Add new cells
self.global_map = np.vstack((self.global_map[outside_mask, :], new_cells.detach().cpu().numpy()))
self.map_times = np.vstack((self.map_times[outside_mask, :], visited_times))
# Garbage Collection
self.garbage_collection()
return self.global_map
def garbage_collection(self):
self.map_times += 1
# Remove cells with T > self.delete_time
recent_mask = self.map_times < self.delete_time
recent_mask = np.squeeze(recent_mask)
self.map_times = self.map_times[recent_mask, :]
self.global_map = self.global_map[recent_mask, :]
# Propagate map given a transformation matrix
def propagate(self, pose):
self.global_pose = pose
# Was just initialized
if self.initial_pose is None:
self.initial_pose = pose
# Relative transformation between origin and current point
relative_translation = pose[:3, 3] - self.initial_pose[:3, 3]
# To select voxels from memory, find the nearest voxel
voxel_sizes = self.voxel_sizes.detach().cpu().numpy()
self.voxel_translation = np.round(relative_translation / voxel_sizes) * voxel_sizes
self.nearest_voxel = self.initial_pose[:3, 3] + self.voxel_translation
# Predict labels for points after propagating pose
def label_points(self, points):
points = torch.from_numpy(points).to(self.device)
global_pose = torch.from_numpy(self.global_pose).to(self.device)
points = torch.matmul(global_pose[:3, :3], points.T).T + global_pose[:3, 3]
labels = torch.zeros((points.shape[0], self.num_classes), dtype=torch.float32, device=self.device)
local_map, local_min_bound, local_max_bound, __ = self.get_local_map()
local_mask = torch.all((points < local_max_bound) & (points >= local_min_bound), dim=1)
local_points = points[local_mask]
grid_inds = torch.floor((local_points - local_min_bound) / self.voxel_sizes)
maxes = (self.grid_size - 1).view(1, 3)
clipped_inds = torch.clamp(grid_inds, torch.zeros_like(maxes), maxes).to(torch.long)
labels[local_mask, :] = local_map[clipped_inds[:, 0], clipped_inds[:, 1], clipped_inds[:, 2], :]
labels[~local_mask, :] = self.prior
# TODO: Add some sort of thresholding based on variance
# TODO: Add calculation of expectation, variance
predictions = torch.argmax(labels, dim=1)
predictions[~local_mask] = self.ignore_labels[0]
return predictions, local_mask
| 7,482 | 46.66242 | 142 | py |
NeuralBKI | NeuralBKI-main/Models/BKINet.py | import torch
# BKINet consists of two components:
# 1) A pre-trained semantic segmentation model
# 2) A pre-trained ConvBKI layer
# This module is intended for ROS integration
class BKINet(torch.nn.Module):
def __init__(self, grid_size, min_bound, max_bound, weights, filter_size, segmentation_net,
num_classes=21, prior=0.001, device="cpu", datatype=torch.float32):
super().__init__()
self.segmentation_net = segmentation_net
self.min_bound = min_bound.view(-1, 3).to(device)
self.max_bound = max_bound.view(-1, 3).to(device)
self.grid_size = grid_size
self.dtype = datatype
def grid_ind(self, input_pc):
'''
Input:
input_xyz: N * (x, y, z, c) float32 array, point cloud
Output:
grid_inds: N' * (x, y, z, c) int32 array, point cloud mapped to voxels
'''
input_xyz = input_pc[:, :3]
labels = input_pc[:, 3].view(-1, 1)
valid_input_mask = torch.all((input_xyz < self.max_bound) & (input_xyz >= self.min_bound), axis=1)
valid_xyz = input_xyz[valid_input_mask]
valid_labels = labels[valid_input_mask]
grid_inds = torch.floor((valid_xyz - self.min_bound) / self.voxel_sizes)
maxes = (self.grid_size - 1).view(1, 3)
clipped_inds = torch.clamp(grid_inds, torch.zeros_like(maxes), maxes)
return torch.hstack((clipped_inds, valid_labels))
| 1,442 | 34.195122 | 106 | py |
pivnet | pivnet-main/pivnet.py | from typing import List
import pickle, itertools
from numba import jit, i4, i8, f4, typeof
from numba.typed import List
from numba.experimental import jitclass
import numpy as np
from sklearn.preprocessing import StandardScaler
from collections import OrderedDict
from scipy.spatial import KDTree
import multiprocessing as mp
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
class SimpleDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def get_state_dict(path):
state_dict = torch.load(path)['state_dict']
# remove prefix 'net.' and remake state dict
return OrderedDict(
{key[4:]: val for key, val in state_dict.items()})
def read_pickle(path):
with open(path, mode='rb') as f:
instance = pickle.load(f)
return instance
def to_pickle(path, instance):
with open(path, mode='wb') as f:
pickle.dump(instance, f)
def generate_pivots(
database: np.array, grid: int, k: int,
margin=0., n_threads=1):
""" generate pivots
database: np.array
database
bins: int
#grid of each dimension
k: int
calculate knn distance of pivots
knnd_scaler: StandardScaler
standard scaler fitted to knn distance
margin: float
margin of data space boundary
n_threads: int
number of threads to calculate kNN of pivots
set s.t. grid ** dim % n_threads == 0
returns: Pivots
"""
dim = len(database[0])
# calculate ranges
min_values = database.min(axis=0) - margin
max_values = database.max(axis=0) + margin
# make grid
hist, edges = np.histogramdd(
database,
bins=grid,
range=list(zip(min_values, max_values)),
density=True)
# make pivots
edges_wo_max = np.array(edges)[:, :-1]
pivots = np.array(list(
itertools.product(*edges_wo_max)))
# add half of bin width into edges
for i in range(dim):
bin_width = (max_values[i] - min_values[i]) / grid
pivots[:, i] += bin_width / 2
# search knn of pivots
index = KDTree(database)
# knnd, _ = index.query(proxy_queries, k=k)
batch_size = len(pivots) // n_threads
with mp.Pool(n_threads) as pool:
async_results = []
for i in range(0, len(pivots), batch_size):
proxy_query_batch = pivots[i:i+batch_size]
async_results.append(pool.apply_async(
index.query, (proxy_query_batch, k)))
results = np.array(
[async_res.get()[0] for async_res in async_results]) \
.reshape(-1, k)
pivots = pivots.astype('float32')
knnd = results.astype('float32')
return Pivots(
dim, grid, min_values, max_values,
pivots, knnd)
@jitclass
class Pivots:
dim: i4 # dimension
grid: i4 # #grid of each dimension
min_values: f4[:] # min values for each dimension
max_values: f4[:] # max values for each dimension
bin_width: f4[:] # bin width of histogram
cell_diag: f4 # diagonal length of each cell
pivots: f4[:, :] # pivots
knnd: f4[:, :] # knn distance of pivots
def __init__(self, dim: i4, grid: i4,
min_values: f4[:], max_values: f4[:],
pivots: f4[:, :], knnd: f4[:, :]) -> None:
"""
dim: int32
dimension of data
grid: int32
#grid of each dimension
min_values: np.array[float32]
min values for each dimension
max_values: np.array[float32]
max values for each dimension
pivots: np.array[np.array[float32]]
pivots
knnd: np.array[np.array[float32]]
knn distance of pivots
"""
self.dim = dim
self.grid = grid
self.min_values = min_values
self.max_values = max_values
self.bin_width = (max_values - min_values) / grid
self.cell_diag = np.sqrt((self.bin_width ** 2).sum())
self.pivots = pivots
self.knnd = knnd
def calc_index(self, query: f4[:]) -> i4:
"""
returns: int32
pivot index of a given query in pivots array
"""
index = 0
for i in range(self.dim):
index = index * self.grid + int(
(query[i] - self.min_values[i]) \
/ (self.max_values[i] - self.min_values[i]) \
* self.grid)
return index
def calc_indices(self, queries: f4[:, :]) -> i4[:]:
"""
returns: np.array[int32]
pivot indices of given queries in pivots array
"""
indices = []
for data in queries:
index = self.calc_index(data)
indices.append(index)
return np.array(indices)
def get_feature(self, query: f4[:]) -> f4[:]:
"""
returns: np.array[float32]
0:dim query coordinates
dim normalized distance between query and nearest pivot
dim+1: pivot's knn distances
"""
index = self.calc_index(query)
pivot = self.pivots[index]
dist_from_pivot = np.linalg.norm(query - pivot)
return np.concatenate((
query,
np.array([dist_from_pivot]) / self.cell_diag,
self.knnd[index]
))
def get_kth_feature(self, query: f4[:], k: i4) -> f4[:]:
"""
returns: np.array[float32]
0:dim query coordinates
dim normalized distance between query and nearest pivot
dim+1 pivot's k-th nn distance
"""
index = self.calc_index(query)
pivot = self.pivots[index]
dist_from_pivot = np.linalg.norm(query - pivot)
return np.concatenate((
query,
np.array([dist_from_pivot]) / self.cell_diag,
np.array([self.knnd[index, k-1]])
))
def get_features(self, queries: f4[:, :]) -> f4[:, :]:
"""
returns: np.array[np.array[float32]]
features of given queries
"""
features = np.empty((
len(queries), len(self.knnd[0])+self.dim+1))
for i, query in enumerate(queries):
features[i] = self.get_feature(query)
return features
def get_kth_features(self, queries: f4[:, :], ks: i4[:]) -> f4[:, :]:
"""
returns: np.array[np.array[float32]]
features of given queries
"""
features = np.zeros((len(queries), self.dim+2))
for i, (query, k) in enumerate(zip(queries, ks)):
features[i] = self.get_kth_feature(query, k)
return features
def calc_knnd_upper_bound(self, query: f4[:]) -> f4[:]:
"""
returns: float32
upper bound of knn distances of query
"""
index = self.calc_index(query)
pivot = self.pivots[index]
pivot_knnd = self.knnd[index]
dist_from_pivot = np.linalg.norm(query - pivot)
upper_bound = pivot_knnd + dist_from_pivot
return upper_bound
def calc_knnd_upper_bounds(self, queries: f4[:, :]) -> f4[:, :]:
"""
returns: float32
upper bounds of knn distances of queries
"""
results = np.empty((len(queries), len(self.knnd[0])))
for i, query in enumerate(queries):
results[i] = self.calc_knn_upper_bound(query)
return results
class PivNet(nn.Module):
def __init__(self, n_units: List[int], pivots: Pivots,
query_scaler: StandardScaler, knnd_scaler: StandardScaler):
super().__init__()
self.dim = pivots.dim
self.k_max = n_units[-1]
self.pivots = pivots
self.query_scaler = query_scaler
self.knnd_mean = knnd_scaler.mean_[:self.k_max]
self.knnd_std = knnd_scaler.scale_[:self.k_max]
self.fc = nn.Sequential(
nn.Linear(n_units[0], n_units[1]),
nn.ReLU(),
nn.Linear(n_units[1], n_units[2]),
nn.ReLU(),
nn.Linear(n_units[2], n_units[3]),
nn.ReLU(),
nn.Linear(n_units[3], n_units[4]),
)
def forward(self, x: torch.tensor) -> torch.tensor:
feature = torch.from_numpy(
self.pivots.get_features(x.numpy())).float()
# scale query feature
feature[:, :self.dim] = torch.from_numpy(
self.query_scaler.transform(feature[:, :self.dim]))
# scale pivot's knnd
feature[:, self.dim+1:] = \
(feature[:, self.dim+1:] - self.knnd_mean) / self.knnd_std
pred = self.fc(feature)
return pred
def estimate(self, x: torch.tensor) -> torch.tensor:
# returns: unscaled knn distances
pred = self.forward(x)
pred = pred * self.knnd_std + self.knnd_mean
return pred
class PivNetItr(nn.Module):
def __init__(self, dim: int, k_max: int, n_units: List[int],
pivots: Pivots, query_scaler: StandardScaler,
dist_mean: float, dist_std: float):
super().__init__()
self.dim = dim
self.k_max = k_max
self.k_mean = np.arange(1, k_max+1).mean()
self.k_std = np.arange(1, k_max+1).std()
self.pivots = pivots
self.query_scaler = query_scaler
self.dist_mean = dist_mean
self.dist_std = dist_std
self.fc = nn.Sequential(
nn.Linear(n_units[0], n_units[1]),
nn.ReLU(),
nn.Linear(n_units[1], n_units[2]),
nn.ReLU(),
nn.Linear(n_units[2], n_units[3]),
nn.ReLU(),
nn.Linear(n_units[3], n_units[4]),
)
def forward(self, x: torch.tensor) -> torch.tensor:
"""
x: torch.tensor
each element consists of k and query
"""
# scale k
k = x[:, 0].numpy().astype('int32')
k_scaled = (x[:, :1] - self.k_mean) / self.k_std
queries = x[:, 1:]
feature = torch.from_numpy(
self.pivots.get_kth_features(queries.numpy(), k)).float()
# scale query feature
feature[:, :self.dim] = torch.from_numpy(
self.query_scaler.transform(feature[:, :self.dim]))
# scale pivot's k-th nnd
feature[:, self.dim+1] = \
(feature[:, self.dim+1] - self.dist_mean) / self.dist_std
feature = torch.cat([k_scaled, feature], dim=1)
pred = self.fc(feature)
return pred
def estimate(self, x):
# returns: unscaled knn distance
pred = self.forward(x)
pred = pred * self.dist_std + self.dist_mean
return pred
| 10,860 | 30.120344 | 73 | py |
tdqn | tdqn-master/tdqn/tdqn.py | import time
import math, random
import numpy as np
from os.path import join as pjoin
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import logger
import copy
from replay import *
from schedule import *
from models import TDQN
from env import *
import jericho
from jericho.template_action_generator import TemplateActionGenerator
import sentencepiece as spm
def configure_logger(log_dir):
logger.configure(log_dir, format_strs=['log'])
global tb
tb = logger.Logger(log_dir, [logger.make_output_format('tensorboard', log_dir),
logger.make_output_format('csv', log_dir),
logger.make_output_format('stdout', log_dir)])
global log
log = logger.log
class TDQN_Trainer(object):
def __init__(self, args):
configure_logger(args.output_dir)
log(args)
self.args = args
self.log_freq = args.log_freq
self.update_freq = args.update_freq_td
self.update_freq_tar = args.update_freq_tar
self.filename = 'tdqn'
self.sp = spm.SentencePieceProcessor()
self.sp.Load(args.spm_path)
self.binding = jericho.load_bindings(args.rom_path)
self.vocab_act, self.vocab_act_rev = self.load_vocab_act(args.rom_path)
vocab_size = len(self.sp)
vocab_size_act = len(self.vocab_act.keys())
self.template_generator = TemplateActionGenerator(self.binding)
self.template_size = len(self.template_generator.templates)
if args.replay_buffer_type == 'priority':
self.replay_buffer = PriorityReplayBuffer(int(args.replay_buffer_size))
elif args.replay_buffer_type == 'standard':
self.replay_buffer = ReplayBuffer(int(args.replay_buffer_size))
self.model = TDQN(args, self.template_size, vocab_size, vocab_size_act).cuda()
self.target_model = TDQN(args, self.template_size, vocab_size, vocab_size_act).cuda()
self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr)
self.num_steps = args.steps
self.batch_size = args.batch_size
self.gamma = args.gamma
self.rho = args.rho
self.bce_loss = nn.BCELoss()
def load_vocab_act(self, rom_path):
#loading vocab directly from Jericho
env = FrotzEnv(rom_path)
vocab = {i+2: str(v) for i, v in enumerate(env.get_dictionary())}
vocab[0] = ' '
vocab[1] = '<s>'
vocab_rev = {v: idx for idx, v in vocab.items()}
env.close()
return vocab, vocab_rev
def state_rep_generator(self, state_description):
remove = ['=', '-', '\'', ':', '[', ']', 'eos', 'EOS', 'SOS', 'UNK', 'unk', 'sos', '<', '>']
for rm in remove:
state_description = state_description.replace(rm, '')
state_description = state_description.split('|')
ret = [self.sp.encode_as_ids('<s>' + s_desc + '</s>') for s_desc in state_description]
return pad_sequences(ret, maxlen=self.args.max_seq_len)
def plot(self, frame_idx, rewards, losses, completion_steps):
fig = plt.figure(figsize=(20, 5))
plt.subplot(131)
plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.subplot(132)
plt.title('frame %s. steps: %s' % (frame_idx, np.mean(completion_steps[-10:])))
plt.plot(completion_steps)
plt.subplot(133)
plt.title('loss-lstm-dqn')
plt.plot(losses)
# txt = "Gamma:" + str(self.gamma) + ", Num Frames:" + str(self.num_frames) + ", E Decay:" + str(epsilon_decay)
plt.figtext(0.5, 0.01, self.filename, wrap=True, horizontalalignment='center', fontsize=12)
# plt.show()
fig.savefig('plots/' + self.filename + '_' + str(frame_idx) + '.png')
def compute_td_loss(self):
state, action, reward, next_state, done, valid = self.replay_buffer.sample(self.batch_size, self.rho)
action = torch.LongTensor(action).cuda()
state = torch.LongTensor(state).permute(1, 0, 2).cuda()
next_state = torch.LongTensor(next_state).permute(1, 0, 2).detach().cuda()
template_targets = torch.stack([v[0] for v in valid]).cuda()
obj_targets = torch.stack([v[1] for v in valid]).cuda()
decode_steps = []
for t in action[:, 0]:
decode_steps.append(self.template_generator.templates[t.item()].count('OBJ'))
template = action[:, 0]
object1 = action[:, 1]
object2 = action[:, 2]
reward = torch.FloatTensor(reward).cuda()
done = torch.FloatTensor(1 * done).cuda()
o1_mask, o2_mask = [0] * self.batch_size, [0] * self.batch_size
for d, st in enumerate(decode_steps):
if st > 1:
o1_mask[d] = 1
o2_mask[d] = 1
elif st == 1:
o1_mask[d] = 1
o1_mask, o2_mask = torch.FloatTensor(o1_mask).cuda(), torch.FloatTensor(o2_mask).cuda()
self.model.flatten_parameters()
q_t, q_o1, q_o2 = self.model(state)
supervised_loss = self.bce_loss(F.softmax(q_t, dim=1), template_targets)+\
self.bce_loss(F.softmax(q_o1, dim=1), obj_targets)+\
self.bce_loss(F.softmax(q_o2, dim=1), obj_targets)
tb.logkv_mean('SupervisedLoss', supervised_loss.item())
self.target_model.flatten_parameters()
next_q_t, next_q_o1, next_q_o2 = self.target_model(next_state)
q_t = q_t.gather(1, template.unsqueeze(1)).squeeze(1)
q_o1 = q_o1.gather(1, object1.unsqueeze(1)).squeeze(1)
q_o2 = q_o2.gather(1, object2.unsqueeze(1)).squeeze(1)
next_q_t = next_q_t.max(1)[0]
next_q_o1 = next_q_o1.max(1)[0]
next_q_o2 = next_q_o2.max(1)[0]
td_loss = F.smooth_l1_loss(q_t, (reward + self.gamma * next_q_t).detach()) +\
F.smooth_l1_loss(q_o1 * o1_mask, o1_mask * (reward + self.gamma * next_q_o1).detach()) +\
F.smooth_l1_loss(q_o2 * o2_mask, o2_mask * (reward + self.gamma * next_q_o2).detach())
tb.logkv_mean('TDLoss', td_loss.item())
loss = td_loss + supervised_loss
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
self.optimizer.step()
return loss
def tmpl_to_str(self, template_idx, o1_id, o2_id):
template_str = self.template_generator.templates[template_idx]
holes = template_str.count('OBJ')
assert holes <= 2
if holes <= 0:
return template_str
elif holes == 1:
return template_str.replace('OBJ', self.vocab_act[o1_id])
else:
return template_str.replace('OBJ', self.vocab_act[o1_id], 1)\
.replace('OBJ', self.vocab_act[o2_id], 1)
def generate_targets_multilabel(self, valid_acts):
template_targets = torch.zeros([self.template_size])
obj_targets = torch.zeros([len(self.vocab_act.keys())])
for act in valid_acts:
template_targets[act.template_id] = 1
for obj_id in act.obj_ids:
obj_targets[obj_id] = 1
return template_targets, obj_targets
def train(self):
start = time.time()
env = JerichoEnv(self.args.rom_path, 0, self.vocab_act_rev,
self.args.env_step_limit)
env.create()
episode = 1
state_text, info = env.reset()
state_rep = self.state_rep_generator(state_text)
for frame_idx in range(1, self.num_steps + 1):
found_valid_action = False
while not found_valid_action:
templates, o1s, o2s, q_ts, q_o1s, q_o2s = self.model.poly_act(state_rep)
for template, o1, o2, q_t, q_o1, q_o2 in zip(templates, o1s, o2s, q_ts, q_o1s, q_o2s):
action = [template, o1, o2]
action_str = self.tmpl_to_str(template, o1, o2)
next_state_text, reward, done, info = env.step(action_str)
if info['action_valid'] == True:
found_valid_action = True
break
if episode % 100 == 0:
log('Action: {} Q_t: {:.2f} Q_o1: {:.2f} Q_o2: {:.2f}'.format(action_str, q_t, q_o1, q_o2))
log('Obs: {}'.format(clean(next_state_text.split('|')[2])))
log('Reward {}: {}'.format(env.steps, reward))
valid_acts = info['valid']
template_targets, obj_targets = self.generate_targets_multilabel(valid_acts)
next_state_rep = self.state_rep_generator(next_state_text)
self.replay_buffer.push(state_rep, action, reward, next_state_rep,
done, (template_targets, obj_targets))
state_text = next_state_text
state_rep = next_state_rep
if done:
score = info['score']
if episode % 100 == 0:
log('Episode {} Score {}\n'.format(episode, score))
tb.logkv_mean('EpisodeScore', score)
state_text, info = env.reset()
state_rep = self.state_rep_generator(state_text)
episode += 1
if len(self.replay_buffer) > self.batch_size:
if frame_idx % self.update_freq == 0:
loss = self.compute_td_loss()
tb.logkv_mean('Loss', loss.item())
if frame_idx % self.update_freq_tar == 0:
self.target_model = copy.deepcopy(self.model)
if frame_idx % self.log_freq == 0:
tb.logkv('Step', frame_idx)
tb.logkv('FPS', int(frame_idx/(time.time()-start)))
tb.dumpkvs()
env.close()
parameters = {
'model': self.model,
'target': self.target_model,
'replay_buffer': self.replay_buffer
}
torch.save(parameters, pjoin(self.args.output_dir, self.filename + '_final.pt'))
def pad_sequences(sequences, maxlen=None, dtype='int32', value=0.):
'''
Partially borrowed from Keras
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
# pre truncating
trunc = s[-maxlen:]
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
# post padding
x[idx, :len(trunc)] = trunc
return x
| 11,644 | 37.816667 | 119 | py |
tdqn | tdqn-master/tdqn/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
class TDQN(nn.Module):
def __init__(self, args, template_size, vocab_size, vocab_size_act):
super(TDQN, self).__init__()
self.embeddings = nn.Embedding(vocab_size_act, args.embedding_size)
self.state_network = StateNetwork(args, vocab_size)
self.t_scorer = nn.Linear(args.hidden_size, template_size)
self.o1_scorer = nn.Linear(args.hidden_size, vocab_size_act)
self.o2_scorer = nn.Linear(args.hidden_size, vocab_size_act)
self.args = args
self.template_size = template_size
self.vocab_size_act = vocab_size_act
def forward(self, state):
x, h = self.state_network(state)
q_t = self.t_scorer(x)
q_o1 = self.o1_scorer(x)
q_o2 = self.o2_scorer(x)
return q_t, q_o1, q_o2
def act(self, state, epsilon):
with torch.no_grad():
state = torch.LongTensor(state).unsqueeze(0).permute(1, 0, 2).cuda()
q_t, q_o1, q_o2 = self.forward(state)
t, o1, o2 = F.softmax(q_t, dim=1).multinomial(num_samples=1).item(),\
F.softmax(q_o1, dim=1).multinomial(num_samples=1).item(),\
F.softmax(q_o2, dim=1).multinomial(num_samples=1).item()
q_t = q_t[0,t].item()
q_o1 = q_o1[0,o1].item()
q_o2 = q_o2[0,o2].item()
return t, o1, o2, q_t, q_o1, q_o2
def poly_act(self, state, n_samples=512, replacement=True):
''' Samples many times from the model, optionally with replacement. '''
with torch.no_grad():
state = torch.LongTensor(state).unsqueeze(0).permute(1, 0, 2).cuda()
q_t, q_o1, q_o2 = self.forward(state)
t, o1, o2 = F.softmax(q_t, dim=1).multinomial(n_samples, replacement)[0],\
F.softmax(q_o1, dim=1).multinomial(n_samples, replacement)[0],\
F.softmax(q_o2, dim=1).multinomial(n_samples, replacement)[0]
qv_t = torch.index_select(q_t, 1, t).squeeze().cpu().detach().numpy()
qv_o1 = torch.index_select(q_o1, 1, o1).squeeze().cpu().detach().numpy()
qv_o2 = torch.index_select(q_o2, 1, o2).squeeze().cpu().detach().numpy()
return t.cpu().numpy(), o1.cpu().numpy(), o2.cpu().numpy(), qv_t, qv_o1, qv_o2
def flatten_parameters(self):
self.state_network.flatten_parameters()
class StateNetwork(nn.Module):
def __init__(self, args, vocab_size):
super(StateNetwork, self).__init__()
self.args = args
self.enc_look = PackedEncoderRNN(vocab_size, args.hidden_size)
self.enc_inv = PackedEncoderRNN(vocab_size, args.hidden_size)
self.enc_ob = PackedEncoderRNN(vocab_size, args.hidden_size)
self.enc_preva = PackedEncoderRNN(vocab_size, args.hidden_size)
self.fcx = nn.Linear(args.hidden_size * 4, args.hidden_size)
self.fch = nn.Linear(args.hidden_size * 4, args.hidden_size)
def forward(self, obs):
x_l, h_l = self.enc_look(obs[0, :, :], self.enc_look.initHidden(self.args.batch_size))
x_i, h_i = self.enc_inv(obs[1, :, :], self.enc_inv.initHidden(self.args.batch_size))
x_o, h_o = self.enc_ob(obs[2, :, :], self.enc_ob.initHidden(self.args.batch_size))
x_p, h_p = self.enc_preva(obs[3, :, :], self.enc_preva.initHidden(self.args.batch_size))
x = F.relu(self.fcx(torch.cat((x_l, x_i, x_o, x_p), dim=1)))
h = F.relu(self.fch(torch.cat((h_l, h_i, h_o, h_p), dim=2)))
return x, h
def flatten_parameters(self):
self.enc_look.flatten_parameters()
self.enc_inv.flatten_parameters()
self.enc_ob.flatten_parameters()
self.enc_preva.flatten_parameters()
class PackedEncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(PackedEncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden=None):
embedded = self.embedding(input).permute(1,0,2) # T x Batch x EmbDim
if hidden is None:
hidden = self.initHidden(input.size(0))
# Pack the padded batch of sequences
lengths = torch.tensor([torch.nonzero(n)[-1] + 1 for n in input], dtype=torch.long).cuda()
packed = nn.utils.rnn.pack_padded_sequence(embedded, lengths, enforce_sorted=False)
output, hidden = self.gru(packed, hidden)
# Unpack the padded sequence
output, _ = nn.utils.rnn.pad_packed_sequence(output)
# Return only the last timestep of output for each sequence
idx = (lengths-1).view(-1, 1).expand(len(lengths), output.size(2)).unsqueeze(0)
output = output.gather(0, idx).squeeze(0)
return output, hidden
def initHidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size).cuda()
def flatten_parameters(self):
self.gru.flatten_parameters()
| 5,149 | 40.532258 | 98 | py |
tdqn | tdqn-master/drrn/drrn.py | import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from os.path import join as pjoin
from memory import ReplayMemory, Transition, State
from model import DRRN
from util import *
import logger
import sentencepiece as spm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DRRN_Agent:
def __init__(self, args):
self.gamma = args.gamma
self.batch_size = args.batch_size
self.sp = spm.SentencePieceProcessor()
self.sp.Load(args.spm_path)
self.network = DRRN(len(self.sp), args.embedding_dim, args.hidden_dim).to(device)
self.memory = ReplayMemory(args.memory_size)
self.save_path = args.output_dir
self.clip = args.clip
self.optimizer = torch.optim.Adam(self.network.parameters(),
lr=args.learning_rate)
def observe(self, state, act, rew, next_state, next_acts, done):
self.memory.push(state, act, rew, next_state, next_acts, done)
def build_state(self, obs, infos):
""" Returns a state representation built from various info sources. """
obs_ids = [self.sp.EncodeAsIds(o) for o in obs]
look_ids = [self.sp.EncodeAsIds(info['look']) for info in infos]
inv_ids = [self.sp.EncodeAsIds(info['inv']) for info in infos]
return [State(ob, lk, inv) for ob, lk, inv in zip(obs_ids, look_ids, inv_ids)]
def encode(self, obs_list):
""" Encode a list of observations """
return [self.sp.EncodeAsIds(o) for o in obs_list]
def act(self, states, poss_acts, sample=True):
""" Returns a string action from poss_acts. """
idxs, values = self.network.act(states, poss_acts, sample)
act_ids = [poss_acts[batch][idx] for batch, idx in enumerate(idxs)]
return act_ids, idxs, values
def update(self):
if len(self.memory) < self.batch_size:
return
transitions = self.memory.sample(self.batch_size)
batch = Transition(*zip(*transitions))
# Compute Q(s', a') for all a'
# TODO: Use a target network???
next_qvals = self.network(batch.next_state, batch.next_acts)
# Take the max over next q-values
next_qvals = torch.tensor([vals.max() for vals in next_qvals], device=device)
# Zero all the next_qvals that are done
next_qvals = next_qvals * (1-torch.tensor(batch.done, dtype=torch.float, device=device))
targets = torch.tensor(batch.reward, dtype=torch.float, device=device) + self.gamma * next_qvals
# Next compute Q(s, a)
# Nest each action in a list - so that it becomes the only admissible cmd
nested_acts = tuple([[a] for a in batch.act])
qvals = self.network(batch.state, nested_acts)
# Combine the qvals: Maybe just do a greedy max for generality
qvals = torch.cat(qvals)
# Compute Huber loss
loss = F.smooth_l1_loss(qvals, targets.detach())
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.network.parameters(), self.clip)
self.optimizer.step()
return loss.item()
def load(self):
try:
self.memory = pickle.load(open(pjoin(self.save_path, 'memory.pkl'), 'rb'))
self.network = torch.load(pjoin(self.save_path, 'model.pt'))
except Exception as e:
print("Error saving model.")
logging.error(traceback.format_exc())
def save(self):
try:
pickle.dump(self.memory, open(pjoin(self.save_path, 'memory.pkl'), 'wb'))
torch.save(self.network, pjoin(self.save_path, 'model.pt'))
except Exception as e:
print("Error saving model.")
logging.error(traceback.format_exc())
| 3,809 | 36.722772 | 104 | py |
tdqn | tdqn-master/drrn/model.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import itertools
from util import pad_sequences
from memory import State
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DRRN(torch.nn.Module):
"""
Deep Reinforcement Relevance Network - He et al. '16
"""
def __init__(self, vocab_size, embedding_dim, hidden_dim):
super(DRRN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.obs_encoder = nn.GRU(embedding_dim, hidden_dim)
self.look_encoder = nn.GRU(embedding_dim, hidden_dim)
self.inv_encoder = nn.GRU(embedding_dim, hidden_dim)
self.act_encoder = nn.GRU(embedding_dim, hidden_dim)
self.hidden = nn.Linear(4*hidden_dim, hidden_dim)
self.act_scorer = nn.Linear(hidden_dim, 1)
def packed_rnn(self, x, rnn):
""" Runs the provided rnn on the input x. Takes care of packing/unpacking.
x: list of unpadded input sequences
Returns a tensor of size: len(x) x hidden_dim
"""
lengths = torch.tensor([len(n) for n in x], dtype=torch.long, device=device)
# Sort this batch in descending order by seq length
lengths, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
idx_sort = torch.autograd.Variable(idx_sort)
idx_unsort = torch.autograd.Variable(idx_unsort)
padded_x = pad_sequences(x)
x_tt = torch.from_numpy(padded_x).type(torch.long).to(device)
x_tt = x_tt.index_select(0, idx_sort)
# Run the embedding layer
embed = self.embedding(x_tt).permute(1,0,2) # Time x Batch x EncDim
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(embed, lengths)
# Run the RNN
out, _ = rnn(packed)
# Unpack
out, _ = nn.utils.rnn.pad_packed_sequence(out)
# Get the last step of each sequence
idx = (lengths-1).view(-1,1).expand(len(lengths), out.size(2)).unsqueeze(0)
out = out.gather(0, idx).squeeze(0)
# Unsort
out = out.index_select(0, idx_unsort)
return out
def forward(self, state_batch, act_batch):
"""
Batched forward pass.
obs_id_batch: iterable of unpadded sequence ids
act_batch: iterable of lists of unpadded admissible command ids
Returns a tuple of tensors containing q-values for each item in the batch
"""
# Zip the state_batch into an easy access format
state = State(*zip(*state_batch))
# This is number of admissible commands in each element of the batch
act_sizes = [len(a) for a in act_batch]
# Combine next actions into one long list
act_batch = list(itertools.chain.from_iterable(act_batch))
act_out = self.packed_rnn(act_batch, self.act_encoder)
# Encode the various aspects of the state
obs_out = self.packed_rnn(state.obs, self.obs_encoder)
look_out = self.packed_rnn(state.description, self.look_encoder)
inv_out = self.packed_rnn(state.inventory, self.inv_encoder)
state_out = torch.cat((obs_out, look_out, inv_out), dim=1)
# Expand the state to match the batches of actions
state_out = torch.cat([state_out[i].repeat(j,1) for i,j in enumerate(act_sizes)], dim=0)
z = torch.cat((state_out, act_out), dim=1) # Concat along hidden_dim
z = F.relu(self.hidden(z))
act_values = self.act_scorer(z).squeeze(-1)
# Split up the q-values by batch
return act_values.split(act_sizes)
def act(self, states, act_ids, sample=True):
""" Returns an action-string, optionally sampling from the distribution
of Q-Values.
"""
act_values = self.forward(states, act_ids)
if sample:
act_probs = [F.softmax(vals, dim=0) for vals in act_values]
act_idxs = [torch.multinomial(probs, num_samples=1).item() \
for probs in act_probs]
else:
act_idxs = [vals.argmax(dim=0).item() for vals in act_values]
return act_idxs, act_values
| 4,282 | 40.990196 | 96 | py |
tdqn | tdqn-master/drrn/train.py | import subprocess
import time
import os
import torch
import logger
import argparse
import yaml
import jericho
from os.path import basename, dirname
from drrn import DRRN_Agent
from vec_env import VecEnv
from env import JerichoEnv
from jericho.util import clean
def configure_logger(log_dir):
logger.configure(log_dir, format_strs=['log'])
global tb
tb = logger.Logger(log_dir, [logger.make_output_format('tensorboard', log_dir),
logger.make_output_format('csv', log_dir),
logger.make_output_format('stdout', log_dir)])
global log
log = logger.log
def evaluate(agent, env, nb_episodes=1):
with torch.no_grad():
total_score = 0
for ep in range(nb_episodes):
log("Starting evaluation episode {}".format(ep))
score = evaluate_episode(agent, env)
log("Evaluation episode {} ended with score {}\n\n".format(ep, score))
total_score += score
avg_score = total_score / nb_episodes
return avg_score
def evaluate_episode(agent, env):
step = 0
done = False
ob, info = env.reset()
state = agent.build_state([ob], [info])[0]
log('Obs{}: {} Inv: {} Desc: {}'.format(step, clean(ob), clean(info['inv']), clean(info['look'])))
while not done:
valid_acts = info['valid']
valid_ids = agent.encode(valid_acts)
_, action_idx, action_values = agent.act([state], [valid_ids], sample=False)
action_idx = action_idx[0]
action_values = action_values[0]
action_str = valid_acts[action_idx]
log('Action{}: {}, Q-Value {:.2f}'.format(step, action_str, action_values[action_idx].item()))
s = ''
for idx, (act, val) in enumerate(sorted(zip(valid_acts, action_values), key=lambda x: x[1], reverse=True), 1):
s += "{}){:.2f} {} ".format(idx, val.item(), act)
log('Q-Values: {}'.format(s))
ob, rew, done, info = env.step(action_str)
log("Reward{}: {}, Score {}, Done {}".format(step, rew, info['score'], done))
step += 1
log('Obs{}: {} Inv: {} Desc: {}'.format(step, clean(ob), clean(info['inv']), clean(info['look'])))
state = agent.build_state([ob], [info])[0]
return info['score']
def train(agent, eval_env, envs, max_steps, update_freq, eval_freq, checkpoint_freq, log_freq):
start = time.time()
obs, infos = envs.reset()
states = agent.build_state(obs, infos)
valid_ids = [agent.encode(info['valid']) for info in infos]
for step in range(1, max_steps+1):
action_ids, action_idxs, _ = agent.act(states, valid_ids)
action_strs = [info['valid'][idx] for info, idx in zip(infos, action_idxs)]
obs, rewards, dones, infos = envs.step(action_strs)
for done, info in zip(dones, infos):
if done:
tb.logkv_mean('EpisodeScore', info['score'])
next_states = agent.build_state(obs, infos)
next_valids = [agent.encode(info['valid']) for info in infos]
for state, act, rew, next_state, valids, done in \
zip(states, action_ids, rewards, next_states, next_valids, dones):
agent.observe(state, act, rew, next_state, valids, done)
states = next_states
valid_ids = next_valids
if step % log_freq == 0:
tb.logkv('Step', step)
tb.logkv("FPS", int((step*envs.num_envs)/(time.time()-start)))
tb.dumpkvs()
if step % update_freq == 0:
loss = agent.update()
if loss is not None:
tb.logkv_mean('Loss', loss)
if step % checkpoint_freq == 0:
agent.save()
if step % eval_freq == 0:
eval_score = evaluate(agent, eval_env)
tb.logkv('EvalScore', eval_score)
tb.dumpkvs()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default='logs')
parser.add_argument('--spm_path', default='../spm_models/unigram_8k.model')
parser.add_argument('--rom_path', default='zork1.z5')
parser.add_argument('--env_step_limit', default=100, type=int)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_envs', default=8, type=int)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--checkpoint_freq', default=5000, type=int)
parser.add_argument('--eval_freq', default=5000, type=int)
parser.add_argument('--log_freq', default=100, type=int)
parser.add_argument('--memory_size', default=500000, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--gamma', default=.9, type=float)
parser.add_argument('--learning_rate', default=0.0001, type=float)
parser.add_argument('--clip', default=5, type=float)
parser.add_argument('--embedding_dim', default=128, type=int)
parser.add_argument('--hidden_dim', default=128, type=int)
return parser.parse_args()
def start_redis():
print('Starting Redis')
subprocess.Popen(['redis-server', '--save', '\"\"', '--appendonly', 'no'])
time.sleep(1)
def main():
assert jericho.__version__ == '2.1.0', "This code is designed to be run with Jericho version 2.1.0."
args = parse_args()
print(args)
configure_logger(args.output_dir)
start_redis()
agent = DRRN_Agent(args)
env = JerichoEnv(args.rom_path, args.seed, args.env_step_limit)
envs = VecEnv(args.num_envs, env)
env.create() # Create the environment for evaluation
train(agent, env, envs, args.max_steps, args.update_freq, args.eval_freq,
args.checkpoint_freq, args.log_freq)
def interactive_run(env):
ob, info = env.reset()
while True:
print(clean(ob), 'Reward', reward, 'Done', done, 'Valid', info)
ob, reward, done, info = env.step(input())
if __name__ == "__main__":
main()
| 5,988 | 38.926667 | 118 | py |
RioGNN | RioGNN-main/train.py | import os
import argparse
from time import localtime, strftime, time
from sklearn.model_selection import train_test_split
from utils.utils import *
from model.model import *
from model.layers import *
from model.graphsage import *
from RL.rl_model import *
"""
Training and testing RIO-GNN
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
parser = argparse.ArgumentParser()
# dataset and model dependent args
parser.add_argument('--data', type=str, default='amazon', help='The dataset name. [yelp, amazon, mimic]')
parser.add_argument('--log_path', default='log/', type=str, help="Path of results")
parser.add_argument('--model', type=str, default='RIO', help='The model name. [RIO, SAGE]')
parser.add_argument('--inter', type=str, default='GNN',
help='The inter-relation aggregator type. [Att, Weight, Mean, GNN]')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size 1024 for yelp, 256 for amazon, X for mimic.')
# hyper-parameters
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('--lambda_1', type=float, default=2, help='Simi loss weight.')
parser.add_argument('--lambda_2', type=float, default=1e-3, help='Weight decay (L2 loss weight).')
parser.add_argument('--emb_size', type=int, default=64, help='Node embedding size at the last layer.')
parser.add_argument('--num_epochs', type=int, default=500, help='Number of epochs.')
parser.add_argument('--test_epochs', type=int, default=3, help='Epoch interval to run test set.')
parser.add_argument('--test_ratio', type=float, default=0.60, help='Test set size.')
parser.add_argument('--under_sample', type=int, default=1, help='Under-sampling scale.')
# other args
parser.add_argument('--use_cuda', default=False, action='store_true', help='Training with CUDA.')
parser.add_argument('--seed', type=int, default=72, help='Random seed.')
# RL args
parser.add_argument('--device', type=str, default="cpu", help='"cuda" if torch.cuda.is_available() else "cpu".')
parser.add_argument('--GAMMA', type=float, default=0.95, help='Actor discount factor.')
parser.add_argument('--LR', type=float, default=0.01, help='Actor learning rate.')
parser.add_argument('--stop_num', type=int, default=3, help='Deep switching or termination conditions.')
parser.add_argument('--ALPHA', type=int, default=10, help='Adjustment parameters for depth and width.')
if __name__ == '__main__':
print('\n+------------------------------------------------------------------------------------------+\n'
'* Training and testing RIO-GNN *\n'
'* Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks *\n'
'* Source: https://github.com/safe-graph/RioGNN *\n'
'\n+------------------------------------------------------------------------------------------+\n', flush=True
)
# load hyper-parameters
args = parser.parse_args()
# generate log folder
log_save_path = args.log_path + 'log_' + strftime("%m%d%H%M%S", localtime())
os.mkdir(log_save_path)
print("Log save path: ", log_save_path, flush=True)
# device
args.cuda = args.use_cuda and torch.cuda.is_available()
print("CUDA: " + str(args.cuda), flush=True)
# load graph, feature, and label
homo, relations, feat_data, labels, index = load_data(args.data)
print("Running on: " + str(args.data), flush=True)
print("The number of relations: " + str(len(relations)), flush=True)
# train_test split
np.random.seed(args.seed)
random.seed(args.seed)
idx_train, idx_test, y_train, y_test = train_test_split(index, labels, stratify=labels,
test_size=args.test_ratio, random_state=2, shuffle=True)
# split pos neg sets for under-sampling
train_pos, train_neg = pos_neg_split(idx_train, y_train)
# initialize model input
features = nn.Embedding(feat_data.shape[0], feat_data.shape[1])
feat_data = normalize(feat_data)
features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
if args.cuda:
features.cuda()
# initialize RL action space
width_rl = [args.ALPHA for r in range(len(relations))]
height_rl = [math.ceil(pow(len(max(relations[r].values(), key=len)), 1 / width_rl[r]))
for r in range(len(relations))]
print('Width of each relation tree: ' + str(width_rl), flush=True)
print('Height of each relation tree: ' + str(height_rl), flush=True)
# build one-layer models
print('Model: {0}, Inter-AGG: {1}, emb_size: {2}.'.format(args.model, args.inter, args.emb_size))
if args.model == 'RIO':
adj_lists = relations
intra_aggs = [IntraAgg(features, feat_data.shape[1], cuda=args.cuda) for r in range(len(relations))]
inter1 = InterAgg(width_rl, height_rl, args.device, args.LR, args.GAMMA, args.stop_num,
features, feat_data.shape[1],
args.emb_size, adj_lists,
intra_aggs, inter=args.inter,
cuda=args.cuda)
gnn_model = OneLayerRio(2, inter1, args.lambda_1)
elif args.model == 'SAGE':
adj_lists = homo
agg1 = MeanAggregator(features, cuda=args.cuda)
enc1 = Encoder(features, feat_data.shape[1], args.emb_size, adj_lists, agg1, gcn=True, cuda=args.cuda)
# the vanilla GraphSAGE model as baseline
enc1.num_samples = 5
gnn_model = GraphSage(2, enc1)
if args.cuda:
gnn_model.cuda()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, gnn_model.parameters()), lr=args.lr,
weight_decay=args.lambda_2)
gnn_auc_train = 0
start_all_time = time()
# train the model
for epoch in range(args.num_epochs):
print('\n+------------------------------------------------------------------------------------------+\n'
' Epoch {0} '
'\n+------------------------------------------------------------------------------------------+\n'.
format(epoch), flush=True
)
# randomly under-sampling negative nodes for each epoch
sampled_idx_train = undersample(train_pos, train_neg, scale=args.under_sample)
rd.shuffle(sampled_idx_train)
# send number of batches to model to let the RLModule know the training progress
num_batches = int(len(sampled_idx_train) / args.batch_size) + 1
if args.model == 'RIO':
inter1.batch_num = num_batches
inter1.auc = gnn_auc_train
loss = 0.0
epoch_time = 0
# mini-batch training
for batch in range(num_batches):
start_time = time()
i_start = batch * args.batch_size
i_end = min((batch + 1) * args.batch_size, len(sampled_idx_train))
batch_nodes = sampled_idx_train[i_start:i_end]
batch_label = labels[np.array(batch_nodes)]
optimizer.zero_grad()
if args.cuda:
loss = gnn_model.loss(batch_nodes, Variable(torch.cuda.LongTensor(batch_label)))
else:
loss = gnn_model.loss(batch_nodes, Variable(torch.LongTensor(batch_label)))
loss.backward()
optimizer.step()
end_time = time()
epoch_time += end_time - start_time
loss += loss.item()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', flush=True)
print('Loss: {0}, time: {1}s'.format(loss.item() / num_batches, epoch_time), flush=True)
# testing the model for every $test_epoch$ epoch
if epoch % args.test_epochs == 0:
if args.model == 'SAGE':
test_sage(idx_test, y_test, gnn_model, args.batch_size)
else:
gnn_auc, label_auc, gnn_recall, label_recall = test_rio(idx_test, y_test, gnn_model, args.batch_size)
gnn_auc_train = test_rio_train(idx_train, y_train, gnn_model, args.batch_size)
# termination
if not inter1.RL:
break
# log
with open(log_save_path + '/thresholds_log.txt', 'w') as file:
for l in inter1.rl_tree.thresholds_log:
file.writelines(str(l) + '\n')
with open(log_save_path + '/states_log.txt', 'w') as file:
for l in inter1.rl_tree.states_log:
file.writelines(str(l) + '\n')
# end
print('\n+------------------------------------------------------------------------------------------+\n')
end_all_time = time()
total_epoch_time = end_all_time - start_all_time
print('Total time spent: ' + str(total_epoch_time), flush=True)
print('Total epoch: ' + str(epoch), flush=True)
| 8,973 | 45.497409 | 120 | py |
RioGNN | RioGNN-main/RL/actor_critic.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
"""
Actor-Critic implementations
Paper: Actor-Critic Algorithms
Source: https://github.com/llSourcell/actor_critic
"""
# torch.backends.cudnn.enabled = False # Non-deterministic algorithm
class PGNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
"""
Initialize PGNetwork.
:param state_dim: dimension of the state
:param action_dim: dimension of the action
"""
super(PGNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, action_dim)
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
class Actor(object):
def __init__(self, state_dim, action_dim, device, LR):
# Dimensions of state space and action space
self.state_dim = state_dim
self.action_dim = action_dim
self.device = device
self.LR = LR
# init network parameters
self.network = PGNetwork(state_dim=self.state_dim, action_dim=self.action_dim).to(self.device)
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.LR)
# init some parameters
self.time_step = 0
def choose_action(self, observation):
observation = torch.FloatTensor(observation).to(self.device)
network_output = self.network.forward(observation)
with torch.no_grad():
# prob_weights = F.softmax(network_output, dim=0).cuda().data.cpu().numpy()
prob_weights = F.softmax(network_output, dim=0).data.cpu().numpy()
# prob_weights = F.softmax(network_output, dim=0).detach().numpy()
action = np.random.choice(range(prob_weights.shape[0]),
p=prob_weights) # select action w.r.t the actions prob
return action
def learn(self, state, action, td_error):
self.time_step += 1
# Step 1: Forward propagation
softmax_input = self.network.forward(torch.FloatTensor(state).to(self.device)).unsqueeze(0)
action = torch.LongTensor([action]).to(self.device)
neg_log_prob = F.cross_entropy(input=softmax_input, target=action, reduction='none')
# Step 2: Backpropagation
# Here you need to maximize the value of the current strategy,
# so you need to maximize "neg_log_prob * tf_error", that is, minimize "-neg_log_prob * td_error"
loss_a = -neg_log_prob * td_error
self.optimizer.zero_grad()
loss_a.backward()
self.optimizer.step()
class QNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
super(QNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, 1)
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
class Critic(object):
def __init__(self, state_dim, action_dim, device, LR, GAMMA):
# Dimensions of state space and action space
self.state_dim = state_dim
self.action_dim = action_dim
self.device = device
self.LR = LR
self.GAMMA = GAMMA
# init network parameters
self.network = QNetwork(state_dim=self.state_dim, action_dim=self.action_dim).to(self.device)
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.LR)
self.loss_func = nn.MSELoss()
def train_Q_network(self, state, reward, next_state):
s, s_ = torch.FloatTensor(state).to(self.device), torch.FloatTensor(next_state).to(self.device)
# Forward propagation
v = self.network.forward(s) # v(s)
v_ = self.network.forward(s_) # v(s')
# Backpropagation
loss_q = self.loss_func(reward + self.GAMMA * v_, v)
self.optimizer.zero_grad()
loss_q.backward()
self.optimizer.step()
with torch.no_grad():
td_error = reward + self.GAMMA * v_ - v
return td_error
| 4,388 | 32.761538 | 105 | py |
RioGNN | RioGNN-main/model/graphsage.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
import random
"""
GraphSAGE implementations
Paper: Inductive Representation Learning on Large Graphs
Source: https://github.com/williamleif/graphsage-simple/
"""
class GraphSage(nn.Module):
"""
Vanilla GraphSAGE Model
Code partially from https://github.com/williamleif/graphsage-simple/
"""
def __init__(self, num_classes, enc):
super(GraphSage, self).__init__()
self.enc = enc
self.xent = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
embeds = self.enc(nodes)
scores = self.weight.mm(embeds)
return scores.t()
def to_prob(self, nodes):
pos_scores = torch.sigmoid(self.forward(nodes))
return pos_scores
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
class MeanAggregator(nn.Module):
"""
Aggregates a node's embeddings using mean of neighbors' embeddings
"""
def __init__(self, features, cuda=False, gcn=False):
"""
Initializes the aggregator for a specific graph.
features -- function mapping LongTensor of node ids to FloatTensor of feature values.
cuda -- whether to use GPU
gcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style
"""
super(MeanAggregator, self).__init__()
self.features = features
self.cuda = cuda
self.gcn = gcn
def forward(self, nodes, to_neighs, num_sample=10):
"""
nodes --- list of nodes in a batch
to_neighs --- list of sets, each set is the set of neighbors for node in batch
num_sample --- number of neighbors to sample. No sampling if None.
"""
# Local pointers to functions (speed hack)
_set = set
if not num_sample is None:
_sample = random.sample
samp_neighs = [_set(_sample(to_neigh,
num_sample,
)) if len(to_neigh) >= num_sample else to_neigh for to_neigh in to_neighs]
else:
samp_neighs = to_neighs
if self.gcn:
samp_neighs = [samp_neigh.union(set([int(nodes[i])])) for i, samp_neigh in enumerate(samp_neighs)]
unique_nodes_list = list(set.union(*samp_neighs))
unique_nodes = {n: i for i, n in enumerate(unique_nodes_list)}
mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))
column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))]
mask[row_indices, column_indices] = 1
if self.cuda:
mask = mask.cuda()
num_neigh = mask.sum(1, keepdim=True)
mask = mask.div(num_neigh)
if self.cuda:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())
else:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list))
to_feats = mask.mm(embed_matrix)
return to_feats
class Encoder(nn.Module):
"""
Vanilla GraphSAGE Encoder Module
Encodes a node's using 'convolutional' GraphSage approach
"""
def __init__(self, features, feature_dim,
embed_dim, adj_lists, aggregator,
num_sample=10,
base_model=None, gcn=False, cuda=False,
feature_transform=False):
super(Encoder, self).__init__()
self.features = features
self.feat_dim = feature_dim
self.adj_lists = adj_lists
self.aggregator = aggregator
self.num_sample = num_sample
if base_model != None:
self.base_model = base_model
self.gcn = gcn
self.embed_dim = embed_dim
self.cuda = cuda
self.aggregator.cuda = cuda
self.weight = nn.Parameter(
torch.FloatTensor(embed_dim, self.feat_dim if self.gcn else 2 * self.feat_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
"""
Generates embeddings for a batch of nodes.
nodes -- list of nodes
"""
neigh_feats = self.aggregator.forward(nodes, [self.adj_lists[int(node)] for node in nodes],
self.num_sample)
if isinstance(nodes, list):
index = torch.LongTensor(nodes).cuda()
else:
index = nodes
if not self.gcn:
if self.cuda:
self_feats = self.features(index)
else:
self_feats = self.features(index)
combined = torch.cat((self_feats, neigh_feats), dim=1)
else:
combined = neigh_feats
combined = F.relu(self.weight.mm(combined.t()))
return combined | 4,341 | 27.946667 | 101 | py |
RioGNN | RioGNN-main/model/model.py | import torch
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
"""
Rio-GNN Models
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
class OneLayerRio(nn.Module):
"""
The Rio-GNN model in one layer
"""
def __init__(self, num_classes, inter1, lambda_1):
"""
Initialize the Rio-GNN model
:param num_classes: number of classes (2 in our paper)
:param inter1: the inter-relation aggregator that output the final embedding
"""
super(OneLayerRio, self).__init__()
self.inter1 = inter1
self.xent = nn.CrossEntropyLoss()
# the parameter to transform the final embedding
self.weight = nn.Parameter(torch.FloatTensor(num_classes, inter1.embed_dim))
init.xavier_uniform_(self.weight)
self.lambda_1 = lambda_1
def forward(self, nodes, labels, train_flag=True):
embeds1, label_scores = self.inter1(nodes, labels, train_flag)
scores = self.weight.mm(embeds1)
return scores.t(), label_scores
def to_prob(self, nodes, labels, train_flag=True):
gnn_logits, label_logits = self.forward(nodes, labels, train_flag)
gnn_scores = torch.sigmoid(gnn_logits)
label_scores = torch.sigmoid(label_logits)
return gnn_scores, label_scores
def loss(self, nodes, labels, train_flag=True):
gnn_scores, label_scores = self.forward(nodes, labels, train_flag)
# Simi loss, Eq. (4) in the paper
label_loss = self.xent(label_scores, labels.squeeze())
# GNN loss, Eq. (10) in the paper
gnn_loss = self.xent(gnn_scores, labels.squeeze())
# the loss function of Rio-GNN, Eq. (11) in the paper
final_loss = gnn_loss + self.lambda_1 * label_loss
return final_loss
class TwoLayerRio(nn.Module):
"""
The Rio-GNN model in one layer
"""
def __init__(self, num_classes, inter1, inter2, lambda_1, last_label_scores):
"""
Initialize the Rio-GNN model
:param num_classes: number of classes (2 in our paper)
:param inter1: the inter-relation aggregator that output the final embedding
"""
super(TwoLayerRio, self).__init__()
self.inter1 = inter1
self.inter2 = inter2
self.xent = nn.CrossEntropyLoss()
# the parameter to transform the final embedding
self.weight = nn.Parameter(torch.FloatTensor(num_classes, inter2.embed_dim))
init.xavier_uniform_(self.weight)
self.lambda_1 = lambda_1
self.last_label_scores = last_label_scores
def forward(self, nodes, labels, train_flag=True):
label_scores_one = self.last_label_scores
embeds2, label_scores_two = self.inter2(nodes, labels, train_flag)
scores2 = self.weight.mm(embeds2)
return scores2.t(), label_scores_one, label_scores_two
def to_prob(self, nodes, labels, train_flag=True):
gnn_logits2, label_logits_one, label_logits_two = self.forward(nodes, labels, train_flag)
gnn_scores2 = torch.sigmoid(gnn_logits2)
label_scores_one = torch.sigmoid(label_logits_one)
label_scores_two = torch.sigmoid(label_logits_two)
return gnn_scores2, label_scores_one, label_scores_two
def loss(self, nodes, labels, train_flag=True):
gnn_scores2, label_scores_one, label_scores_two = self.forward(nodes, labels, train_flag)
# Simi loss, Eq. (4) in the paper
label_loss_one = self.xent(label_scores_one, labels.squeeze())
label_loss_two = self.xent(label_scores_two, labels.squeeze())
# GNN loss, Eq. (10) in the paper
gnn_loss2 = self.xent(gnn_scores2, labels.squeeze())
# the loss function of Rio-GNN, Eq. (11) in the paper
final_loss = gnn_loss2 + self.lambda_1 * label_loss_one
#final_loss = gnn_loss2 + (label_loss_one + label_loss_two)
return final_loss | 3,611 | 34.067961 | 91 | py |
RioGNN | RioGNN-main/model/layers.py | import sys
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
from operator import itemgetter
import math
from RL.rl_model import *
"""
Rio-GNN Layers
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
class InterAgg(nn.Module):
def __init__(self, width_rl, height_rl, device, LR, GAMMA, stop_num,
features, feature_dim,
embed_dim, adj_lists, intra_aggs,
inter, cuda=True):
"""
Initialize the inter-relation aggregator
:param width_rl: width of each relation tree
:param height_rl: height of each relation tree
:param device: "cuda" / "cpu"
:param LR: Actor learning rate (hyper-parameters of AC)
:param GAMMA: Actor discount factor (hyper-parameters of AC)
:param stop_num: deep switching or termination conditions
:param features: the input node features or embeddings for all nodes
:param feature_dim: the input dimension
:param embed_dim: the output dimension
:param adj_lists: a list of adjacency lists for each single-relation graph
:param intra_aggs: the intra-relation aggregators used by each single-relation graph
:param inter: the aggregator type: 'Att', 'Weight', 'Mean', 'GNN'
:param cuda: whether to use GPU
"""
super(InterAgg, self).__init__()
self.features = features
self.dropout = 0.6
self.adj_lists = adj_lists
self.intra_aggs = intra_aggs
self.embed_dim = embed_dim
self.feat_dim = feature_dim
self.inter = inter
self.cuda = cuda
# initial filtering thresholds
self.thresholds = [0.5 for r in range(len(intra_aggs))]
# RL condition flag
self.RL = True
self.rl_tree = RLForest(width_rl, height_rl, device, LR, GAMMA, stop_num, len(intra_aggs))
# number of batches for current epoch, assigned during training
self.batch_num = 0
self.auc = 0
# the activation function used by attention mechanism
self.leakyrelu = nn.LeakyReLU(0.2)
# parameter used to transform node embeddings before inter-relation aggregation
self.weight = nn.Parameter(torch.FloatTensor(self.embed_dim, self.feat_dim))
init.xavier_uniform_(self.weight)
# weight parameter for each relation used by Rio-Weight
self.alpha = nn.Parameter(torch.FloatTensor(self.embed_dim, len(intra_aggs)))
init.xavier_uniform_(self.alpha)
# parameters used by attention layer
self.a = nn.Parameter(torch.FloatTensor(2 * self.embed_dim, 1))
init.xavier_uniform_(self.a)
# label predictor for similarity measure
self.label_clf = nn.Linear(self.feat_dim, 2)
# initialize the parameter logs
self.weights_log = []
def forward(self, nodes, labels, train_flag=True):
"""
:param nodes: a list of batch node ids
:param labels: a list of batch node labels, only used by the RLModule
:param train_flag: indicates whether in training or testing mode
:return combined: the embeddings of a batch of input node features
:return center_scores: the label-aware scores of batch nodes
"""
# extract 1-hop neighbor ids from adj lists of each single-relation graph
to_neighs = []
for adj_list in self.adj_lists:
to_neighs.append([set(adj_list[int(node)]) for node in nodes])
# find unique nodes and their neighbors used in current batch
unique_nodes = set.union(*(set.union(*to_neighs[r]) for r in range(len(self.intra_aggs))), set(nodes))
# calculate label-aware scores
if self.cuda:
batch_features = self.features(torch.cuda.LongTensor(list(unique_nodes)))
else:
batch_features = self.features(torch.LongTensor(list(unique_nodes)))
batch_scores = self.label_clf(batch_features)
id_mapping = {node_id: index for node_id, index in zip(unique_nodes, range(len(unique_nodes)))}
# the label-aware scores for current batch of nodes
center_scores = batch_scores[itemgetter(*nodes)(id_mapping), :]
# get neighbor node id list for each batch node and relation
r_list = [[list(to_neigh) for to_neigh in to_neighs[r]] for r in range(len(self.intra_aggs))]
# assign label-aware scores to neighbor nodes for each batch node and relation
r_scores = [[batch_scores[itemgetter(*to_neigh)(id_mapping), :].view(-1, 2) for to_neigh in r_list[r]]
for r in range(len(self.intra_aggs))]
# count the number of neighbors kept for aggregation for each batch node and relation
r_sample_num_list = [[math.ceil(len(neighs) * self.thresholds[r]) for neighs in r_list[r]]
for r in range(len(self.intra_aggs))]
# intra-aggregation steps for each relation
# Eq. (8) in the paper
r_feats, r_scores = tuple(
zip(*list(self.intra_aggs[r].forward(nodes, r_list[r], center_scores, r_scores[r], r_sample_num_list[r])
for r in range(len(self.intra_aggs)))))
# concat the intra-aggregated embeddings from each relation
neigh_feats = torch.cat(r_feats, dim=0)
# get features or embeddings for batch nodes
if self.cuda and isinstance(nodes, list):
index = torch.LongTensor(nodes).cuda()
else:
index = torch.LongTensor(nodes)
self_feats = self.features(index)
# number of nodes in a batch
n = len(nodes)
# inter-relation aggregation steps
# Eq. (9) in the paper
if self.inter == 'Att':
# 1) Rio-Att Inter-relation Aggregator
combined, attention = att_inter_agg(len(self.adj_lists), self.leakyrelu, self_feats, neigh_feats,
self.embed_dim,
self.weight, self.a, n, self.dropout, self.training, self.cuda)
elif self.inter == 'Weight':
# 2) Rio-Weight Inter-relation Aggregator
combined = weight_inter_agg(len(self.adj_lists), self_feats, neigh_feats, self.embed_dim, self.weight,
self.alpha, n, self.cuda)
gem_weights = F.softmax(torch.sum(self.alpha, dim=0), dim=0).tolist()
if train_flag:
print(f'Weights: {gem_weights}')
elif self.inter == 'Mean':
# 3) Rio-Mean Inter-relation Aggregator
combined = mean_inter_agg(len(self.adj_lists), self_feats, neigh_feats, self.embed_dim, self.weight, n,
self.cuda)
elif self.inter == 'GNN':
# 4) Rio-GNN Inter-relation Aggregator
combined = threshold_inter_agg(len(self.adj_lists), self_feats, neigh_feats, self.embed_dim, self.weight,
self.thresholds, n, self.cuda)
# the reinforcement learning module
if self.RL and train_flag:
thresholds, stop_flag = self.rl_tree.get_threshold(list(r_scores), labels, self.thresholds, self.batch_num,
self.auc)
self.thresholds = thresholds
self.RL = stop_flag
return combined, center_scores
class IntraAgg(nn.Module):
def __init__(self, features, feat_dim, cuda=False):
"""
Initialize the intra-relation aggregator
:param features: the input node features or embeddings for all nodes
:param feat_dim: the input dimension
:param cuda: whether to use GPU
"""
super(IntraAgg, self).__init__()
self.features = features
self.cuda = cuda
self.feat_dim = feat_dim
def forward(self, nodes, to_neighs_list, batch_scores, neigh_scores, sample_list):
"""
Code partially from https://github.com/williamleif/graphsage-simple/
:param nodes: list of nodes in a batch
:param to_neighs_list: neighbor node id list for each batch node in one relation
:param batch_scores: the label-aware scores of batch nodes
:param neigh_scores: the label-aware scores 1-hop neighbors each batch node in one relation
:param sample_list: the number of neighbors kept for each batch node in one relation
:return to_feats: the aggregated embeddings of batch nodes neighbors in one relation
:return samp_scores: the average neighbor distances for each relation after filtering
"""
# filer neighbors under given relation
samp_neighs, samp_scores = filter_neighs_ada_threshold(batch_scores, neigh_scores, to_neighs_list, sample_list)
# find the unique nodes among batch nodes and the filtered neighbors
unique_nodes_list = list(set.union(*samp_neighs))
unique_nodes = {n: i for i, n in enumerate(unique_nodes_list)}
# intra-relation aggregation only with sampled neighbors
mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))
column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
row_indices = [i for i in range(len(samp_neighs)) for _ in range(len(samp_neighs[i]))]
mask[row_indices, column_indices] = 1
if self.cuda:
mask = mask.cuda()
num_neigh = mask.sum(1, keepdim=True)
mask = mask.div(num_neigh)
if self.cuda:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())
else:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list))
to_feats = mask.mm(embed_matrix)
to_feats = F.relu(to_feats)
return to_feats, samp_scores
def filter_neighs_ada_threshold(center_scores, neigh_scores, neighs_list, sample_list):
"""
Filter neighbors according label predictor result with adaptive thresholds
:param center_scores: the label-aware scores of batch nodes
:param neigh_scores: the label-aware scores 1-hop neighbors each batch node in one relation
:param neighs_list: neighbor node id list for each batch node in one relation
:param sample_list: the number of neighbors kept for each batch node in one relation
:return samp_neighs: the neighbor indices and neighbor simi scores
:return samp_scores: the average neighbor distances for each relation after filtering
"""
samp_neighs = []
samp_scores = []
for idx, center_score in enumerate(center_scores):
center_score = center_scores[idx][0]
neigh_score = neigh_scores[idx][:, 0].view(-1, 1)
center_score = center_score.repeat(neigh_score.size()[0], 1)
neighs_indices = neighs_list[idx]
num_sample = sample_list[idx]
# compute the L1-distance of batch nodes and their neighbors
# Eq. (2) in paper
score_diff = torch.abs(center_score - neigh_score).squeeze()
sorted_scores, sorted_indices = torch.sort(score_diff, dim=0, descending=False)
selected_indices = sorted_indices.tolist()
# top-p sampling according to distance ranking and thresholds
# Section 3.3.1 in paper
if len(neigh_scores[idx]) > num_sample + 1:
selected_neighs = [neighs_indices[n] for n in selected_indices[:num_sample]]
selected_scores = sorted_scores.tolist()[:num_sample]
else:
selected_neighs = neighs_indices
selected_scores = score_diff.tolist()
if isinstance(selected_scores, float):
selected_scores = [selected_scores]
samp_neighs.append(set(selected_neighs))
samp_scores.append(selected_scores)
return samp_neighs, samp_scores
def mean_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, n, cuda):
"""
Mean inter-relation aggregator
:param num_relations: number of relations in the graph
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param n: number of nodes in a batch
:param cuda: whether use GPU
:return: inter-relation aggregated node embeddings
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = weight.mm(self_feats.t())
neigh_h = weight.mm(neigh_feats.t())
# initialize the final neighbor embedding
if cuda:
aggregated = torch.zeros(size=(embed_dim, n)).cuda()
else:
aggregated = torch.zeros(size=(embed_dim, n))
# sum neighbor embeddings together
for r in range(num_relations):
aggregated += neigh_h[:, r * n:(r + 1) * n]
# sum aggregated neighbor embedding and batch node embedding
# take the average of embedding and feed them to activation function
combined = F.relu((center_h + aggregated) / 4.0)
return combined
def weight_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, alpha, n, cuda):
"""
Weight inter-relation aggregator
Reference: https://arxiv.org/abs/2002.12307
:param num_relations: number of relations in the graph
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param alpha: weight parameter for each relation used by Rio-Weight
:param n: number of nodes in a batch
:param cuda: whether use GPU
:return: inter-relation aggregated node embeddings
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = weight.mm(self_feats.t())
neigh_h = weight.mm(neigh_feats.t())
# compute relation weights using softmax
w = F.softmax(alpha, dim=1)
# initialize the final neighbor embedding
if cuda:
aggregated = torch.zeros(size=(embed_dim, n)).cuda()
else:
aggregated = torch.zeros(size=(embed_dim, n))
# add weighted neighbor embeddings in each relation together
for r in range(num_relations):
aggregated += torch.mul(w[:, r].unsqueeze(1).repeat(1, n), neigh_h[:, r * n:(r + 1) * n])
# sum aggregated neighbor embedding and batch node embedding
# feed them to activation function
combined = F.relu(center_h + aggregated)
return combined
def att_inter_agg(num_relations, att_layer, self_feats, neigh_feats, embed_dim, weight, a, n, dropout, training, cuda):
"""
Attention-based inter-relation aggregator
Reference: https://github.com/Diego999/pyGAT
:param num_relations: num_relations: number of relations in the graph
:param att_layer: the activation function used by the attention layer
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param a: parameters used by attention layer
:param n: number of nodes in a batch
:param dropout: dropout for attention layer
:param training: a flag indicating whether in the training or testing mode
:param cuda: whether use GPU
:return combined: inter-relation aggregated node embeddings
:return att: the attention weights for each relation
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = self_feats.mm(weight.t())
neigh_h = neigh_feats.mm(weight.t())
# compute attention weights
combined = torch.cat((center_h.repeat(num_relations, 1), neigh_h), dim=1)
e = att_layer(combined.mm(a))
attention = torch.cat((e[0:n, :], e[n:2 * n, :], e[2 * n:num_relations * n, :]), dim=1)
ori_attention = F.softmax(attention, dim=1)
attention = F.dropout(ori_attention, dropout, training=training)
# initialize the final neighbor embedding
if cuda:
aggregated = torch.zeros(size=(n, embed_dim)).cuda()
else:
aggregated = torch.zeros(size=(n, embed_dim))
# add neighbor embeddings in each relation together with attention weights
for r in range(num_relations):
aggregated += torch.mul(attention[:, r].unsqueeze(1).repeat(1, embed_dim), neigh_h[r * n:(r + 1) * n, :])
# sum aggregated neighbor embedding and batch node embedding
# feed them to activation function
combined = F.relu((center_h + aggregated).t())
# extract the attention weights
att = F.softmax(torch.sum(ori_attention, dim=0), dim=0)
return combined, att
def threshold_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, threshold, n, cuda):
"""
Rio-GNN inter-relation aggregator
Eq. (9) in the paper
:param num_relations: number of relations in the graph
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param threshold: the neighbor filtering thresholds used as aggregating weights
:param n: number of nodes in a batch
:param cuda: whether use GPU
:return: inter-relation aggregated node embeddings
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = weight.mm(self_feats.t())
neigh_h = weight.mm(neigh_feats.t())
if cuda:
# use thresholds as aggregating weights
w = torch.FloatTensor(threshold).repeat(weight.size(0), 1).cuda()
# initialize the final neighbor embedding
aggregated = torch.zeros(size=(embed_dim, n)).cuda()
else:
w = torch.FloatTensor(threshold).repeat(weight.size(0), 1)
aggregated = torch.zeros(size=(embed_dim, n))
# add weighted neighbor embeddings in each relation together
for r in range(num_relations):
aggregated += torch.mul(w[:, r].unsqueeze(1).repeat(1, n), neigh_h[:, r * n:(r + 1) * n])
# sum aggregated neighbor embedding and batch node embedding
# feed them to activation function
combined = F.relu(center_h + aggregated)
return combined
| 18,857 | 42.855814 | 119 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_copy_detection.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
class CopydaysDataset():
def __init__(self, basedir):
self.basedir = basedir
self.block_names = (
['original', 'strong'] +
['jpegqual/%d' % i for i in
[3, 5, 8, 10, 15, 20, 30, 50, 75]] +
['crops/%d' % i for i in
[10, 15, 20, 30, 40, 50, 60, 70, 80]])
self.nblocks = len(self.block_names)
self.query_blocks = range(self.nblocks)
self.q_block_sizes = np.ones(self.nblocks, dtype=int) * 157
self.q_block_sizes[1] = 229
# search only among originals
self.database_blocks = [0]
def get_block(self, i):
dirname = self.basedir + '/' + self.block_names[i]
fnames = [dirname + '/' + fname
for fname in sorted(os.listdir(dirname))
if fname.endswith('.jpg')]
return fnames
def get_block_filenames(self, subdir_name):
dirname = self.basedir + '/' + subdir_name
return [fname
for fname in sorted(os.listdir(dirname))
if fname.endswith('.jpg')]
def eval_result(self, ids, distances):
j0 = 0
for i in range(self.nblocks):
j1 = j0 + self.q_block_sizes[i]
block_name = self.block_names[i]
I = ids[j0:j1] # block size
sum_AP = 0
if block_name != 'strong':
# 1:1 mapping of files to names
positives_per_query = [[i] for i in range(j1 - j0)]
else:
originals = self.get_block_filenames('original')
strongs = self.get_block_filenames('strong')
# check if prefixes match
positives_per_query = [
[j for j, bname in enumerate(originals)
if bname[:4] == qname[:4]]
for qname in strongs]
for qno, Iline in enumerate(I):
positives = positives_per_query[qno]
ranks = []
for rank, bno in enumerate(Iline):
if bno in positives:
ranks.append(rank)
sum_AP += score_ap_from_ranks_1(ranks, len(positives))
print("eval on %s mAP=%.3f" % (
block_name, sum_AP / (j1 - j0)))
j0 = j1
# from the Holidays evaluation package
def score_ap_from_ranks_1(ranks, nres):
""" Compute the average precision of one search.
ranks = ordered list of ranks of true positives
nres = total number of positives in dataset
"""
# accumulate trapezoids in PR-plot
ap = 0.0
# All have an x-size of:
recall_step = 1.0 / nres
for ntp, rank in enumerate(ranks):
# y-size on left side of trapezoid:
# ntp = nb of true positives so far
# rank = nb of retrieved items so far
if rank == 0:
precision_0 = 1.0
else:
precision_0 = ntp / float(rank)
# y-size on right side of trapezoid:
# ntp and rank are increased by one
precision_1 = (ntp + 1) / float(rank + 1)
ap += (precision_1 + precision_0) * recall_step / 2.0
return ap
class ImgListDataset(torch.utils.data.Dataset):
def __init__(self, img_list, transform=None):
self.samples = img_list
self.transform = transform
def __getitem__(self, i):
with open(self.samples[i], 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, i
def __len__(self):
return len(self.samples)
def is_image_file(s):
ext = s.split(".")[-1]
if ext in ['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif', 'tiff', 'webp']:
return True
return False
@torch.no_grad()
def extract_features(image_list, model, args):
transform = pth_transforms.Compose([
pth_transforms.Resize((args.imsize, args.imsize), interpolation=3),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
tempdataset = ImgListDataset(image_list, transform=transform)
data_loader = torch.utils.data.DataLoader(tempdataset, batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers, drop_last=False,
sampler=torch.utils.data.DistributedSampler(tempdataset, shuffle=False))
features = None
for samples, index in utils.MetricLogger(delimiter=" ").log_every(data_loader, 10):
samples, index = samples.cuda(non_blocking=True), index.cuda(non_blocking=True)
feats = model.get_intermediate_layers(samples, n=1)[0].clone()
cls_output_token = feats[:, 0, :] # [CLS] token
# GeM with exponent 4 for output patch tokens
b, h, w, d = len(samples), int(samples.shape[-2] / model.patch_embed.patch_size), int(samples.shape[-1] / model.patch_embed.patch_size), feats.shape[-1]
feats = feats[:, 1:, :].reshape(b, h, w, d)
feats = feats.clamp(min=1e-6).permute(0, 3, 1, 2)
feats = nn.functional.avg_pool2d(feats.pow(4), (h, w)).pow(1. / 4).reshape(b, -1)
# concatenate [CLS] token and GeM pooled patch tokens
feats = torch.cat((cls_output_token, feats), dim=1)
# init storage feature matrix
if dist.get_rank() == 0 and features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if args.use_cuda:
features = features.cuda(non_blocking=True)
# get indexes from all processes
y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)
y_l = list(y_all.unbind(0))
y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)
y_all_reduce.wait()
index_all = torch.cat(y_l)
# share features between processes
feats_all = torch.empty(dist.get_world_size(), feats.size(0), feats.size(1),
dtype=feats.dtype, device=feats.device)
output_l = list(feats_all.unbind(0))
output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)
output_all_reduce.wait()
# update storage feature matrix
if dist.get_rank() == 0:
if args.use_cuda:
features.index_copy_(0, index_all, torch.cat(output_l))
else:
features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())
return features # features is still None for every rank which is not 0 (main)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Copy detection on Copydays')
parser.add_argument('--data_path', default='/path/to/copydays/', type=str,
help="See https://lear.inrialpes.fr/~jegou/data.php#copydays")
parser.add_argument('--whitening_path', default='/path/to/whitening_data/', type=str,
help="""Path to directory with images used for computing the whitening operator.
In our paper, we use 20k random images from YFCC100M.""")
parser.add_argument('--distractors_path', default='/path/to/distractors/', type=str,
help="Path to directory with distractors images. In our paper, we use 10k random images from YFCC100M.")
parser.add_argument('--imsize', default=320, type=int, help='Image size (square image)')
parser.add_argument('--batch_size_per_gpu', default=16, type=int, help='Per-GPU batch-size')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag)
parser.add_argument('--arch', default='vit_base', type=str, help='Architecture')
parser.add_argument('--patch_size', default=8, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
args = parser.parse_args()
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ building network ... ============
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
else:
print(f"Architecture {args.arch} non supported")
sys.exit(1)
if args.use_cuda:
model.cuda()
model.eval()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
dataset = CopydaysDataset(args.data_path)
# ============ Extract features ... ============
# extract features for queries
queries = []
for q in dataset.query_blocks:
queries.append(extract_features(dataset.get_block(q), model, args))
if utils.get_rank() == 0:
queries = torch.cat(queries)
print(f"Extraction of queries features done. Shape: {queries.shape}")
# extract features for database
database = []
for b in dataset.database_blocks:
database.append(extract_features(dataset.get_block(b), model, args))
# extract features for distractors
if os.path.isdir(args.distractors_path):
print("Using distractors...")
list_distractors = [os.path.join(args.distractors_path, s) for s in os.listdir(args.distractors_path) if is_image_file(s)]
database.append(extract_features(list_distractors, model, args))
if utils.get_rank() == 0:
database = torch.cat(database)
print(f"Extraction of database and distractors features done. Shape: {database.shape}")
# ============ Whitening ... ============
if os.path.isdir(args.whitening_path):
print(f"Extracting features on images from {args.whitening_path} for learning the whitening operator.")
list_whit = [os.path.join(args.whitening_path, s) for s in os.listdir(args.whitening_path) if is_image_file(s)]
features_for_whitening = extract_features(list_whit, model, args)
if utils.get_rank() == 0:
# center
mean_feature = torch.mean(features_for_whitening, dim=0)
database -= mean_feature
queries -= mean_feature
pca = utils.PCA(dim=database.shape[-1], whit=0.5)
# compute covariance
cov = torch.mm(features_for_whitening.T, features_for_whitening) / features_for_whitening.shape[0]
pca.train_pca(cov.cpu().numpy())
database = pca.apply(database)
queries = pca.apply(queries)
# ============ Copy detection ... ============
if utils.get_rank() == 0:
# l2 normalize the features
database = nn.functional.normalize(database, dim=1, p=2)
queries = nn.functional.normalize(queries, dim=1, p=2)
# similarity
similarity = torch.mm(queries, database.T)
distances, indices = similarity.topk(20, largest=True, sorted=True)
# evaluate
retrieved = dataset.eval_result(indices, distances)
dist.barrier()
| 12,631 | 40.827815 | 160 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_linear.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import json
from pathlib import Path
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
def eval_linear(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ building network ... ============
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
embed_dim = model.embed_dim * (args.n_last_blocks + int(args.avgpool_patchtokens))
# if the network is a XCiT
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
embed_dim = model.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch]()
embed_dim = model.fc.weight.shape[1]
model.fc = nn.Identity()
else:
print(f"Unknow architecture: {args.arch}")
sys.exit(1)
model.cuda()
model.eval()
# load weights to evaluate
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
print(f"Model {args.arch} built.")
linear_classifier = LinearClassifier(embed_dim, num_labels=args.num_labels)
linear_classifier = linear_classifier.cuda()
linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu])
# ============ preparing data ... ============
val_transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform)
val_loader = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
if args.evaluate:
utils.load_pretrained_linear_weights(linear_classifier, args.arch, args.patch_size)
test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
train_transform = pth_transforms.Compose([
pth_transforms.RandomResizedCrop(224),
pth_transforms.RandomHorizontalFlip(),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform)
sampler = torch.utils.data.distributed.DistributedSampler(dataset_train)
train_loader = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# set optimizer
optimizer = torch.optim.SGD(
linear_classifier.parameters(),
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
momentum=0.9,
weight_decay=0, # we do not apply weight decay
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": 0.}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=linear_classifier,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
for epoch in range(start_epoch, args.epochs):
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, linear_classifier, optimizer, train_loader, epoch, args.n_last_blocks, args.avgpool_patchtokens)
scheduler.step()
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if epoch % args.val_freq == 0 or epoch == args.epochs - 1:
test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens)
print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
best_acc = max(best_acc, test_stats["acc1"])
print(f'Max accuracy so far: {best_acc:.2f}%')
log_stats = {**{k: v for k, v in log_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()}}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
save_dict = {
"epoch": epoch + 1,
"state_dict": linear_classifier.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar"))
print("Training of the supervised linear classifier on frozen features completed.\n"
"Top-1 test accuracy: {acc:.1f}".format(acc=best_acc))
def train(model, linear_classifier, optimizer, loader, epoch, n, avgpool):
linear_classifier.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
for (inp, target) in metric_logger.log_every(loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if "vit" in args.arch:
intermediate_output = model.get_intermediate_layers(inp, n)
output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1)
if avgpool:
output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1)
output = output.reshape(output.shape[0], -1)
else:
output = model(inp)
output = linear_classifier(output)
# compute cross entropy loss
loss = nn.CrossEntropyLoss()(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# log
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def validate_network(val_loader, model, linear_classifier, n, avgpool):
linear_classifier.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
for inp, target in metric_logger.log_every(val_loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if "vit" in args.arch:
intermediate_output = model.get_intermediate_layers(inp, n)
output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1)
if avgpool:
output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1)
output = output.reshape(output.shape[0], -1)
else:
output = model(inp)
output = linear_classifier(output)
loss = nn.CrossEntropyLoss()(output, target)
if linear_classifier.module.num_labels >= 5:
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
else:
acc1, = utils.accuracy(output, target, topk=(1,))
batch_size = inp.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
if linear_classifier.module.num_labels >= 5:
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
if linear_classifier.module.num_labels >= 5:
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
else:
print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class LinearClassifier(nn.Module):
"""Linear layer to train on top of frozen features"""
def __init__(self, dim, num_labels=1000):
super(LinearClassifier, self).__init__()
self.num_labels = num_labels
self.linear = nn.Linear(dim, num_labels)
self.linear.weight.data.normal_(mean=0.0, std=0.01)
self.linear.bias.data.zero_()
def forward(self, x):
# flatten
x = x.view(x.size(0), -1)
# linear layer
return self.linear(x)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with linear classification on ImageNet')
parser.add_argument('--n_last_blocks', default=4, type=int, help="""Concatenate [CLS] tokens
for the `n` last blocks. We use `n=4` when evaluating ViT-Small and `n=1` with ViT-Base.""")
parser.add_argument('--avgpool_patchtokens', default=False, type=utils.bool_flag,
help="""Whether ot not to concatenate the global average pooled features to the [CLS] token.
We typically set this to False for ViT-Small and to True with ViT-Base.""")
parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument("--lr", default=0.001, type=float, help="""Learning rate at the beginning of
training (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.
We recommend tweaking the LR depending on the checkpoint evaluated.""")
parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('--data_path', default='/path/to/imagenet/', type=str)
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument('--val_freq', default=1, type=int, help="Epoch frequency for validation.")
parser.add_argument('--output_dir', default=".", help='Path to save logs and checkpoints')
parser.add_argument('--num_labels', default=1000, type=int, help='Number of labels for linear classifier')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
args = parser.parse_args()
eval_linear(args)
| 13,256 | 46.010638 | 135 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_image_retrieval.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
class OxfordParisDataset(torch.utils.data.Dataset):
def __init__(self, dir_main, dataset, split, transform=None, imsize=None):
if dataset not in ['roxford5k', 'rparis6k']:
raise ValueError('Unknown dataset: {}!'.format(dataset))
# loading imlist, qimlist, and gnd, in cfg as a dict
gnd_fname = os.path.join(dir_main, dataset, 'gnd_{}.pkl'.format(dataset))
with open(gnd_fname, 'rb') as f:
cfg = pickle.load(f)
cfg['gnd_fname'] = gnd_fname
cfg['ext'] = '.jpg'
cfg['qext'] = '.jpg'
cfg['dir_data'] = os.path.join(dir_main, dataset)
cfg['dir_images'] = os.path.join(cfg['dir_data'], 'jpg')
cfg['n'] = len(cfg['imlist'])
cfg['nq'] = len(cfg['qimlist'])
cfg['im_fname'] = config_imname
cfg['qim_fname'] = config_qimname
cfg['dataset'] = dataset
self.cfg = cfg
self.samples = cfg["qimlist"] if split == "query" else cfg["imlist"]
self.transform = transform
self.imsize = imsize
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
path = os.path.join(self.cfg["dir_images"], self.samples[index] + ".jpg")
ImageFile.LOAD_TRUNCATED_IMAGES = True
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.imsize is not None:
img.thumbnail((self.imsize, self.imsize), Image.ANTIALIAS)
if self.transform is not None:
img = self.transform(img)
return img, index
def config_imname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['imlist'][i] + cfg['ext'])
def config_qimname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['qimlist'][i] + cfg['qext'])
if __name__ == '__main__':
parser = argparse.ArgumentParser('Image Retrieval on revisited Paris and Oxford')
parser.add_argument('--data_path', default='/path/to/revisited_paris_oxford/', type=str)
parser.add_argument('--dataset', default='roxford5k', type=str, choices=['roxford5k', 'rparis6k'])
parser.add_argument('--multiscale', default=False, type=utils.bool_flag)
parser.add_argument('--imsize', default=224, type=int, help='Image size')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag)
parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
args = parser.parse_args()
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = OxfordParisDataset(args.data_path, args.dataset, split="train", transform=transform, imsize=args.imsize)
dataset_query = OxfordParisDataset(args.data_path, args.dataset, split="query", transform=transform, imsize=args.imsize)
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=1,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_query = torch.utils.data.DataLoader(
dataset_query,
batch_size=1,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"train: {len(dataset_train)} imgs / query: {len(dataset_query)} imgs")
# ============ building network ... ============
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch](num_classes=0)
else:
print(f"Architecture {args.arch} non supported")
sys.exit(1)
if args.use_cuda:
model.cuda()
model.eval()
# load pretrained weights
if os.path.isfile(args.pretrained_weights):
state_dict = torch.load(args.pretrained_weights, map_location="cpu")
if args.checkpoint_key is not None and args.checkpoint_key in state_dict:
print(f"Take key {args.checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[args.checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(args.pretrained_weights, msg))
elif args.arch == "vit_small" and args.patch_size == 16:
print("Since no pretrained weights have been provided, we load pretrained DINO weights on Google Landmark v2.")
model.load_state_dict(torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/dino_vitsmall16_googlelandmark_pretrain/dino_vitsmall16_googlelandmark_pretrain.pth"))
else:
print("Warning: We use random weights.")
############################################################################
# Step 1: extract features
train_features = extract_features(model, data_loader_train, args.use_cuda, multiscale=args.multiscale)
query_features = extract_features(model, data_loader_query, args.use_cuda, multiscale=args.multiscale)
if utils.get_rank() == 0: # only rank 0 will work from now on
# normalize features
train_features = nn.functional.normalize(train_features, dim=1, p=2)
query_features = nn.functional.normalize(query_features, dim=1, p=2)
############################################################################
# Step 2: similarity
sim = torch.mm(train_features, query_features.T)
ranks = torch.argsort(-sim, dim=0).cpu().numpy()
############################################################################
# Step 3: evaluate
gnd = dataset_train.cfg['gnd']
# evaluate ranks
ks = [1, 5, 10]
# search for easy & hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk']])
gnd_t.append(g)
mapM, apsM, mprM, prsM = utils.compute_map(ranks, gnd_t, ks)
# search for hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']])
gnd_t.append(g)
mapH, apsH, mprH, prsH = utils.compute_map(ranks, gnd_t, ks)
print('>> {}: mAP M: {}, H: {}'.format(args.dataset, np.around(mapM*100, decimals=2), np.around(mapH*100, decimals=2)))
print('>> {}: mP@k{} M: {}, H: {}'.format(args.dataset, np.array(ks), np.around(mprM*100, decimals=2), np.around(mprH*100, decimals=2)))
dist.barrier()
| 9,288 | 44.985149 | 192 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
dependencies = ["torch", "torchvision"]
def dino_vits16(pretrained=True, **kwargs):
"""
ViT-Small/16x16 pre-trained with DINO.
Achieves 74.5% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_small"](patch_size=16, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_vits8(pretrained=True, **kwargs):
"""
ViT-Small/8x8 pre-trained with DINO.
Achieves 78.3% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_small"](patch_size=8, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_vitb16(pretrained=True, **kwargs):
"""
ViT-Base/16x16 pre-trained with DINO.
Achieves 76.1% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_base"](patch_size=16, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_vitb8(pretrained=True, **kwargs):
"""
ViT-Base/8x8 pre-trained with DINO.
Achieves 77.4% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_base"](patch_size=8, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_resnet50(pretrained=True, **kwargs):
"""
ResNet-50 pre-trained with DINO.
Achieves 75.3% top-1 accuracy on ImageNet linear evaluation benchmark (requires to train `fc`).
"""
model = resnet50(pretrained=False, **kwargs)
model.fc = torch.nn.Identity()
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=False)
return model
def dino_xcit_small_12_p16(pretrained=True, **kwargs):
"""
XCiT-Small-12/16 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p16", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_xcit_small_12_p8(pretrained=True, **kwargs):
"""
XCiT-Small-12/8 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p8", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_xcit_medium_24_p16(pretrained=True, **kwargs):
"""
XCiT-Medium-24/16 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p16", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_xcit_medium_24_p8(pretrained=True, **kwargs):
"""
XCiT-Medium-24/8 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p8", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
| 5,653 | 36.197368 | 124 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/visualize_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import cv2
import random
import colorsys
import requests
from io import BytesIO
import skimage.io
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import numpy as np
from PIL import Image
import utils
import vision_transformer as vits
def apply_mask(image, mask, color, alpha=0.5):
for c in range(3):
image[:, :, c] = image[:, :, c] * (1 - alpha * mask) + alpha * mask * color[c] * 255
return image
def random_colors(N, bright=True):
"""
Generate random colors.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def display_instances(image, mask, fname="test", figsize=(5, 5), blur=False, contour=True, alpha=0.5):
fig = plt.figure(figsize=figsize, frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax = plt.gca()
N = 1
mask = mask[None, :, :]
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
margin = 0
ax.set_ylim(height + margin, -margin)
ax.set_xlim(-margin, width + margin)
ax.axis('off')
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
_mask = mask[i]
if blur:
_mask = cv2.blur(_mask,(10,10))
# Mask
masked_image = apply_mask(masked_image, _mask, color, alpha)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
if contour:
padded_mask = np.zeros((_mask.shape[0] + 2, _mask.shape[1] + 2))
padded_mask[1:-1, 1:-1] = _mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8), aspect='auto')
fig.savefig(fname)
print(f"{fname} saved.")
return
if __name__ == '__main__':
parser = argparse.ArgumentParser('Visualize Self-Attention maps')
parser.add_argument('--arch', default='vit_small', type=str,
choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).')
parser.add_argument('--patch_size', default=8, type=int, help='Patch resolution of the model.')
parser.add_argument('--pretrained_weights', default='', type=str,
help="Path to pretrained weights to load.")
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument("--image_path", default=None, type=str, help="Path of the image to load.")
parser.add_argument("--image_size", default=(480, 480), type=int, nargs="+", help="Resize image.")
parser.add_argument('--output_dir', default='.', help='Path where to save visualizations.')
parser.add_argument("--threshold", type=float, default=None, help="""We visualize masks
obtained by thresholding the self-attention maps to keep xx% of the mass.""")
args = parser.parse_args()
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# build model
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.to(device)
if os.path.isfile(args.pretrained_weights):
state_dict = torch.load(args.pretrained_weights, map_location="cpu")
if args.checkpoint_key is not None and args.checkpoint_key in state_dict:
print(f"Take key {args.checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[args.checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(args.pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if args.arch == "vit_small" and args.patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif args.arch == "vit_small" and args.patch_size == 8:
url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" # model used for visualizations in our paper
elif args.arch == "vit_base" and args.patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif args.arch == "vit_base" and args.patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
# open image
if args.image_path is None:
# user has not specified any image - we use our own image
print("Please use the `--image_path` argument to indicate the path of the image you wish to visualize.")
print("Since no image path have been provided, we take the first image in our paper.")
response = requests.get("https://dl.fbaipublicfiles.com/dino/img.png")
img = Image.open(BytesIO(response.content))
img = img.convert('RGB')
elif os.path.isfile(args.image_path):
with open(args.image_path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
else:
print(f"Provided image path {args.image_path} is non valid.")
sys.exit(1)
transform = pth_transforms.Compose([
pth_transforms.Resize(args.image_size),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
img = transform(img)
# make the image divisible by the patch size
w, h = img.shape[1] - img.shape[1] % args.patch_size, img.shape[2] - img.shape[2] % args.patch_size
img = img[:, :w, :h].unsqueeze(0)
w_featmap = img.shape[-2] // args.patch_size
h_featmap = img.shape[-1] // args.patch_size
attentions = model.get_last_selfattention(img.to(device))
nh = attentions.shape[1] # number of head
# we keep only the output patch attention
attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
if args.threshold is not None:
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=1, keepdim=True)
cumval = torch.cumsum(val, dim=1)
th_attn = cumval > (1 - args.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[head] = th_attn[head][idx2[head]]
th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float()
# interpolate
th_attn = nn.functional.interpolate(th_attn.unsqueeze(0), scale_factor=args.patch_size, mode="nearest")[0].cpu().numpy()
attentions = attentions.reshape(nh, w_featmap, h_featmap)
attentions = nn.functional.interpolate(attentions.unsqueeze(0), scale_factor=args.patch_size, mode="nearest")[0].cpu().numpy()
# save attentions heatmaps
os.makedirs(args.output_dir, exist_ok=True)
torchvision.utils.save_image(torchvision.utils.make_grid(img, normalize=True, scale_each=True), os.path.join(args.output_dir, "img.png"))
for j in range(nh):
fname = os.path.join(args.output_dir, "attn-head" + str(j) + ".png")
plt.imsave(fname=fname, arr=attentions[j], format='png')
print(f"{fname} saved.")
if args.threshold is not None:
image = skimage.io.imread(os.path.join(args.output_dir, "img.png"))
for j in range(nh):
display_instances(image, th_attn[j], fname=os.path.join(args.output_dir, "mask_th" + str(args.threshold) + "_head" + str(j) +".png"), blur=False)
| 9,389 | 42.878505 | 157 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Misc functions.
Mostly copy-paste from torchvision references or other public repos like DETR:
https://github.com/facebookresearch/detr/blob/master/util/misc.py
"""
import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
class GaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = random.random() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
class Solarization(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
elif model_name == "xcit_small_12_p16":
url = "dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth"
elif model_name == "xcit_small_12_p8":
url = "dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth"
elif model_name == "xcit_medium_24_p16":
url = "dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth"
elif model_name == "xcit_medium_24_p8":
url = "dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
def load_pretrained_linear_weights(linear_classifier, model_name, patch_size):
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_linearweights.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_linearweights.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_linearweights.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_linearweights.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_linearweights.pth"
if url is not None:
print("We load the reference pretrained linear weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)["state_dict"]
linear_classifier.load_state_dict(state_dict, strict=True)
else:
print("We use random linear weights.")
def clip_gradients(model, clip):
norms = []
for name, p in model.named_parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
norms.append(param_norm.item())
clip_coef = clip / (param_norm + 1e-6)
if clip_coef < 1:
p.grad.data.mul_(clip_coef)
return norms
def cancel_gradients_last_layer(epoch, model, freeze_last_layer):
if epoch >= freeze_last_layer:
return
for n, p in model.named_parameters():
if "last_layer" in n:
p.grad = None
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
if not os.path.isfile(ckp_path):
return
print("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(ckp_path, map_location="cpu")
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print("=> loaded '{}' from checkpoint '{}' with msg {}".format(key, ckp_path, msg))
except TypeError:
try:
msg = value.load_state_dict(checkpoint[key])
print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path))
except ValueError:
print("=> failed to load '{}' from checkpoint: '{}'".format(key, ckp_path))
else:
print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path))
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.6f} ({global_avg:.6f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.6f}')
data_time = SmoothedValue(fmt='{avg:.6f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.6f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
# launched with torch.distributed.launch
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
# launched with submitit on a slurm cluster
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print('Will run the code on one GPU.')
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Does not support training without GPU.')
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.barrier()
setup_for_distributed(args.rank == 0)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class LARS(torch.optim.Optimizer):
"""
Almost copy-paste from https://github.com/facebookresearch/barlowtwins/blob/main/main.py
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=None, lars_adaptation_filter=None):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim != 1:
dp = dp.add(p, alpha=g['weight_decay'])
if p.ndim != 1:
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['eta'] * param_norm / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
class MultiCropWrapper(nn.Module):
"""
Perform forward pass separately on each resolution input.
The inputs corresponding to a single resolution are clubbed and single
forward is run on the same resolution inputs. Hence we do several
forward passes = number of different resolutions used. We then
concatenate all the output features and run the head forward on these
concatenated features.
"""
def __init__(self, backbone, head):
super(MultiCropWrapper, self).__init__()
# disable layers dedicated to ImageNet labels classification
backbone.fc, backbone.head = nn.Identity(), nn.Identity()
self.backbone = backbone
self.head = head
def forward(self, x):
# convert to list
if not isinstance(x, list):
x = [x]
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in x]),
return_counts=True,
)[1], 0)
start_idx, output = 0, torch.empty(0).to(x[0].device)
for end_idx in idx_crops:
_out = self.backbone(torch.cat(x[start_idx: end_idx]))
# The output is a tuple with XCiT model. See:
# https://github.com/facebookresearch/xcit/blob/master/xcit.py#L404-L405
if isinstance(_out, tuple):
_out = _out[0]
# accumulate outputs
output = torch.cat((output, _out))
start_idx = end_idx
# Run the head forward on the concatenated features.
return self.head(output)
def get_params_groups(model):
regularized = []
not_regularized = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# we do not regularize biases nor Norm parameters
if name.endswith(".bias") or len(param.shape) == 1:
not_regularized.append(param)
else:
regularized.append(param)
return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}]
def has_batchnorms(model):
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
for name, module in model.named_modules():
if isinstance(module, bn_types):
return True
return False
class PCA():
"""
Class to compute and apply PCA.
"""
def __init__(self, dim=256, whit=0.5):
self.dim = dim
self.whit = whit
self.mean = None
def train_pca(self, cov):
"""
Takes a covariance matrix (np.ndarray) as input.
"""
d, v = np.linalg.eigh(cov)
eps = d.max() * 1e-5
n_0 = (d < eps).sum()
if n_0 > 0:
d[d < eps] = eps
# total energy
totenergy = d.sum()
# sort eigenvectors with eigenvalues order
idx = np.argsort(d)[::-1][:self.dim]
d = d[idx]
v = v[:, idx]
print("keeping %.2f %% of the energy" % (d.sum() / totenergy * 100.0))
# for the whitening
d = np.diag(1. / d**self.whit)
# principal components
self.dvt = np.dot(d, v.T)
def apply(self, x):
# input is from numpy
if isinstance(x, np.ndarray):
if self.mean is not None:
x -= self.mean
return np.dot(self.dvt, x.T).T
# input is from torch and is on GPU
if x.is_cuda:
if self.mean is not None:
x -= torch.cuda.FloatTensor(self.mean)
return torch.mm(torch.cuda.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1)
# input if from torch, on CPU
if self.mean is not None:
x -= torch.FloatTensor(self.mean)
return torch.mm(torch.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1)
def compute_ap(ranks, nres):
"""
Computes average precision for given ranked indexes.
Arguments
---------
ranks : zerro-based ranks of positive images
nres : number of positive images
Returns
-------
ap : average precision
"""
# number of images ranked by the system
nimgranks = len(ranks)
# accumulate trapezoids in PR-plot
ap = 0
recall_step = 1. / nres
for j in np.arange(nimgranks):
rank = ranks[j]
if rank == 0:
precision_0 = 1.
else:
precision_0 = float(j) / rank
precision_1 = float(j + 1) / (rank + 1)
ap += (precision_0 + precision_1) * recall_step / 2.
return ap
def compute_map(ranks, gnd, kappas=[]):
"""
Computes the mAP for a given set of returned results.
Usage:
map = compute_map (ranks, gnd)
computes mean average precsion (map) only
map, aps, pr, prs = compute_map (ranks, gnd, kappas)
computes mean average precision (map), average precision (aps) for each query
computes mean precision at kappas (pr), precision at kappas (prs) for each query
Notes:
1) ranks starts from 0, ranks.shape = db_size X #queries
2) The junk results (e.g., the query itself) should be declared in the gnd stuct array
3) If there are no positive images for some query, that query is excluded from the evaluation
"""
map = 0.
nq = len(gnd) # number of queries
aps = np.zeros(nq)
pr = np.zeros(len(kappas))
prs = np.zeros((nq, len(kappas)))
nempty = 0
for i in np.arange(nq):
qgnd = np.array(gnd[i]['ok'])
# no positive images, skip from the average
if qgnd.shape[0] == 0:
aps[i] = float('nan')
prs[i, :] = float('nan')
nempty += 1
continue
try:
qgndj = np.array(gnd[i]['junk'])
except:
qgndj = np.empty(0)
# sorted positions of positive and junk images (0 based)
pos = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgnd)]
junk = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgndj)]
k = 0;
ij = 0;
if len(junk):
# decrease positions of positives based on the number of
# junk images appearing before them
ip = 0
while (ip < len(pos)):
while (ij < len(junk) and pos[ip] > junk[ij]):
k += 1
ij += 1
pos[ip] = pos[ip] - k
ip += 1
# compute ap
ap = compute_ap(pos, len(qgnd))
map = map + ap
aps[i] = ap
# compute precision @ k
pos += 1 # get it to 1-based
for j in np.arange(len(kappas)):
kq = min(max(pos), kappas[j]);
prs[i, j] = (pos <= kq).sum() / kq
pr = pr + prs[i, :]
map = map / (nq - nempty)
pr = pr / (nq - nempty)
return map, aps, pr, prs
def multi_scale(samples, model):
v = None
for s in [1, 1/2**(1/2), 1/2]: # we use 3 different scales
if s == 1:
inp = samples.clone()
else:
inp = nn.functional.interpolate(samples, scale_factor=s, mode='bilinear', align_corners=False)
feats = model(inp).clone()
if v is None:
v = feats
else:
v += feats
v /= 3
v /= v.norm()
return v
| 28,039 | 32.783133 | 119 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/video_generation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import sys
import argparse
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import numpy as np
from PIL import Image
import utils
import vision_transformer as vits
FOURCC = {
"mp4": cv2.VideoWriter_fourcc(*"MP4V"),
"avi": cv2.VideoWriter_fourcc(*"XVID"),
}
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class VideoGenerator:
def __init__(self, args):
self.args = args
# self.model = None
# Don't need to load model if you only want a video
if not self.args.video_only:
self.model = self.__load_model()
def run(self):
if self.args.input_path is None:
print(f"Provided input path {self.args.input_path} is non valid.")
sys.exit(1)
else:
if self.args.video_only:
self._generate_video_from_images(
self.args.input_path, self.args.output_path
)
else:
# If input path exists
if os.path.exists(self.args.input_path):
# If input is a video file
if os.path.isfile(self.args.input_path):
frames_folder = os.path.join(self.args.output_path, "frames")
attention_folder = os.path.join(
self.args.output_path, "attention"
)
os.makedirs(frames_folder, exist_ok=True)
os.makedirs(attention_folder, exist_ok=True)
self._extract_frames_from_video(
self.args.input_path, frames_folder
)
self._inference(
frames_folder,
attention_folder,
)
self._generate_video_from_images(
attention_folder, self.args.output_path
)
# If input is a folder of already extracted frames
if os.path.isdir(self.args.input_path):
attention_folder = os.path.join(
self.args.output_path, "attention"
)
os.makedirs(attention_folder, exist_ok=True)
self._inference(self.args.input_path, attention_folder)
self._generate_video_from_images(
attention_folder, self.args.output_path
)
# If input path doesn't exists
else:
print(f"Provided input path {self.args.input_path} doesn't exists.")
sys.exit(1)
def _extract_frames_from_video(self, inp: str, out: str):
vidcap = cv2.VideoCapture(inp)
self.args.fps = vidcap.get(cv2.CAP_PROP_FPS)
print(f"Video: {inp} ({self.args.fps} fps)")
print(f"Extracting frames to {out}")
success, image = vidcap.read()
count = 0
while success:
cv2.imwrite(
os.path.join(out, f"frame-{count:04}.jpg"),
image,
)
success, image = vidcap.read()
count += 1
def _generate_video_from_images(self, inp: str, out: str):
img_array = []
attention_images_list = sorted(glob.glob(os.path.join(inp, "attn-*.jpg")))
# Get size of the first image
with open(attention_images_list[0], "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
size = (img.width, img.height)
img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
print(f"Generating video {size} to {out}")
for filename in tqdm(attention_images_list[1:]):
with open(filename, "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
out = cv2.VideoWriter(
os.path.join(out, "video." + self.args.video_format),
FOURCC[self.args.video_format],
self.args.fps,
size,
)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print("Done")
def _inference(self, inp: str, out: str):
print(f"Generating attention images to {out}")
for img_path in tqdm(sorted(glob.glob(os.path.join(inp, "*.jpg")))):
with open(img_path, "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
if self.args.resize is not None:
transform = pth_transforms.Compose(
[
pth_transforms.ToTensor(),
pth_transforms.Resize(self.args.resize),
pth_transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
),
]
)
else:
transform = pth_transforms.Compose(
[
pth_transforms.ToTensor(),
pth_transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
),
]
)
img = transform(img)
# make the image divisible by the patch size
w, h = (
img.shape[1] - img.shape[1] % self.args.patch_size,
img.shape[2] - img.shape[2] % self.args.patch_size,
)
img = img[:, :w, :h].unsqueeze(0)
w_featmap = img.shape[-2] // self.args.patch_size
h_featmap = img.shape[-1] // self.args.patch_size
attentions = self.model.get_last_selfattention(img.to(DEVICE))
nh = attentions.shape[1] # number of head
# we keep only the output patch attention
attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=1, keepdim=True)
cumval = torch.cumsum(val, dim=1)
th_attn = cumval > (1 - self.args.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[head] = th_attn[head][idx2[head]]
th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float()
# interpolate
th_attn = (
nn.functional.interpolate(
th_attn.unsqueeze(0),
scale_factor=self.args.patch_size,
mode="nearest",
)[0]
.cpu()
.numpy()
)
attentions = attentions.reshape(nh, w_featmap, h_featmap)
attentions = (
nn.functional.interpolate(
attentions.unsqueeze(0),
scale_factor=self.args.patch_size,
mode="nearest",
)[0]
.cpu()
.numpy()
)
# save attentions heatmaps
fname = os.path.join(out, "attn-" + os.path.basename(img_path))
plt.imsave(
fname=fname,
arr=sum(
attentions[i] * 1 / attentions.shape[0]
for i in range(attentions.shape[0])
),
cmap="inferno",
format="jpg",
)
def __load_model(self):
# build model
model = vits.__dict__[self.args.arch](
patch_size=self.args.patch_size, num_classes=0
)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.to(DEVICE)
if os.path.isfile(self.args.pretrained_weights):
state_dict = torch.load(self.args.pretrained_weights, map_location="cpu")
if (
self.args.checkpoint_key is not None
and self.args.checkpoint_key in state_dict
):
print(
f"Take key {self.args.checkpoint_key} in provided checkpoint dict"
)
state_dict = state_dict[self.args.checkpoint_key]
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print(
"Pretrained weights found at {} and loaded with msg: {}".format(
self.args.pretrained_weights, msg
)
)
else:
print(
"Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate."
)
url = None
if self.args.arch == "vit_small" and self.args.patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif self.args.arch == "vit_small" and self.args.patch_size == 8:
url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" # model used for visualizations in our paper
elif self.args.arch == "vit_base" and self.args.patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif self.args.arch == "vit_base" and self.args.patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
if url is not None:
print(
"Since no pretrained weights have been provided, we load the reference pretrained DINO weights."
)
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/" + url
)
model.load_state_dict(state_dict, strict=True)
else:
print(
"There is no reference weights available for this model => We use random weights."
)
return model
def parse_args():
parser = argparse.ArgumentParser("Generation self-attention video")
parser.add_argument(
"--arch",
default="vit_small",
type=str,
choices=["vit_tiny", "vit_small", "vit_base"],
help="Architecture (support only ViT atm).",
)
parser.add_argument(
"--patch_size", default=8, type=int, help="Patch resolution of the self.model."
)
parser.add_argument(
"--pretrained_weights",
default="",
type=str,
help="Path to pretrained weights to load.",
)
parser.add_argument(
"--checkpoint_key",
default="teacher",
type=str,
help='Key to use in the checkpoint (example: "teacher")',
)
parser.add_argument(
"--input_path",
required=True,
type=str,
help="""Path to a video file if you want to extract frames
or to a folder of images already extracted by yourself.
or to a folder of attention images.""",
)
parser.add_argument(
"--output_path",
default="./",
type=str,
help="""Path to store a folder of frames and / or a folder of attention images.
and / or a final video. Default to current directory.""",
)
parser.add_argument(
"--threshold",
type=float,
default=0.6,
help="""We visualize masks
obtained by thresholding the self-attention maps to keep xx percent of the mass.""",
)
parser.add_argument(
"--resize",
default=None,
type=int,
nargs="+",
help="""Apply a resize transformation to input image(s). Use if OOM error.
Usage (single or W H): --resize 512, --resize 720 1280""",
)
parser.add_argument(
"--video_only",
action="store_true",
help="""Use this flag if you only want to generate a video and not all attention images.
If used, --input_path must be set to the folder of attention images. Ex: ./attention/""",
)
parser.add_argument(
"--fps",
default=30.0,
type=float,
help="FPS of input / output video. Automatically set if you extract frames from a video.",
)
parser.add_argument(
"--video_format",
default="mp4",
type=str,
choices=["mp4", "avi"],
help="Format of generated video (mp4 or avi).",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
vg = VideoGenerator(args)
vg.run()
| 13,669 | 35.068602 | 135 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/vision_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 12,706 | 37.389728 | 124 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/main_dino4k.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
from torch.utils.data.dataset import Dataset
import utils
import vision_transformer4k as vits
from vision_transformer4k import DINOHead
from einops import rearrange, repeat, reduce
torchvision_archs = sorted(name for name in torchvision_models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision_models.__dict__[name]))
def get_args_parser():
parser = argparse.ArgumentParser('DINO4K', add_help=False)
# Model parameters
parser.add_argument('--arch', default='vit_xs', type=str,
choices=['vit4k_xs', 'vit_tiny', 'vit_small', 'vit_base', 'xcit', 'deit_tiny', 'deit_small'] \
+ torchvision_archs + torch.hub.list("facebookresearch/xcit:main"),
help="""Name of architecture to train. For quick experiments with ViTs,
we recommend using vit_tiny or vit_small.""")
parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (vit_tiny, vit_small and vit_base). If <16, we recommend disabling
mixed precision training (--use_fp16 false) to avoid unstabilities.""")
parser.add_argument('--out_dim', default=65536, type=int, help="""Dimensionality of
the DINO head output. For complex and large datasets large values (like 65k) work well.""")
parser.add_argument('--norm_last_layer', default=True, type=utils.bool_flag,
help="""Whether or not to weight normalize the last layer of the DINO head.
Not normalizing leads to better performance but can make the training unstable.
In our experiments, we typically set this paramater to False with vit_small and True with vit_base.""")
parser.add_argument('--momentum_teacher', default=0.996, type=float, help="""Base EMA
parameter for teacher update. The value is increased to 1 during training with cosine schedule.
We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.""")
parser.add_argument('--use_bn_in_head', default=False, type=utils.bool_flag,
help="Whether to use batch normalizations in projection head (Default: False)")
# Temperature teacher parameters
parser.add_argument('--warmup_teacher_temp', default=0.04, type=float,
help="""Initial value for the teacher temperature: 0.04 works well in most cases.
Try decreasing it if the training loss does not decrease.""")
parser.add_argument('--teacher_temp', default=0.04, type=float, help="""Final value (after linear warmup)
of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend
starting with the default value of 0.04 and increase this slightly if needed.""")
parser.add_argument('--warmup_teacher_temp_epochs', default=0, type=int,
help='Number of warmup epochs for the teacher temperature (Default: 30).')
# Training/Optimization parameters
parser.add_argument('--use_fp16', type=utils.bool_flag, default=True, help="""Whether or not
to use half precision for training. Improves training time and memory requirements,
but can provoke instability and slight decay of performance. We recommend disabling
mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.""")
parser.add_argument('--weight_decay', type=float, default=0.04, help="""Initial value of the
weight decay. With ViT, a smaller value at the beginning of training works well.""")
parser.add_argument('--weight_decay_end', type=float, default=0.4, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--clip_grad', type=float, default=3.0, help="""Maximal parameter
gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can
help optimization for larger ViT architectures. 0 for disabling.""")
parser.add_argument('--batch_size_per_gpu', default=64, type=int,
help='Per-GPU batch-size : number of distinct images loaded on one GPU.')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument('--freeze_last_layer', default=1, type=int, help="""Number of epochs
during which we keep the output layer fixed. Typically doing so during
the first epoch helps training. Try increasing this value if the loss does not decrease.""")
parser.add_argument("--lr", default=0.0005, type=float, help="""Learning rate at the end of
linear warmup (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.""")
parser.add_argument("--warmup_epochs", default=10, type=int,
help="Number of epochs for the linear learning-rate warm up.")
parser.add_argument('--min_lr', type=float, default=1e-6, help="""Target LR at the
end of optimization. We use a cosine LR schedule with linear warmup.""")
parser.add_argument('--optimizer', default='adamw', type=str,
choices=['adamw', 'sgd', 'lars'], help="""Type of optimizer. We recommend using adamw with ViTs.""")
parser.add_argument('--drop_path_rate', type=float, default=0.1, help="stochastic depth rate")
# Multi-crop parameters
parser.add_argument('--global_crops_scale', type=float, nargs='+', default=(0.4, 1.),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we
recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""")
parser.add_argument('--local_crops_number', type=int, default=8, help="""Number of small
local views to generate. Set this parameter to 0 to disable multi-crop training.
When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """)
parser.add_argument('--local_crops_scale', type=float, nargs='+', default=(0.05, 0.4),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for small local view cropping of multi-crop.""")
# Misc
parser.add_argument('--data_path', default='/path/to/imagenet/train/', type=str,
help='Please specify path to the ImageNet training data.')
parser.add_argument('--output_dir', default=".", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--saveckp_freq', default=20, type=int, help='Save checkpoint every x epochs.')
parser.add_argument('--seed', default=0, type=int, help='Random seed.')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
return parser
def train_dino(args):
utils.init_distributed_mode(args)
utils.fix_random_seeds(args.seed)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ preparing data ... ============
transform = DataAugmentationDINO4K(
args.local_crops_number
)
# Using custom dataset for our [256 x 384] tensors
dataset = SeqDataset(dataroot=args.data_path, transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset, shuffle=True)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True,
)
print(f"Data loaded: there are {len(dataset)} images.")
# ============ building student and teacher networks ... ============
# we changed the name DeiT-S for ViT-S to avoid confusions
args.arch = args.arch.replace("deit", "vit")
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
student = vits.__dict__[args.arch](
patch_size=args.patch_size,
drop_path_rate=args.drop_path_rate, # stochastic depth
)
teacher = vits.__dict__[args.arch](patch_size=args.patch_size)
embed_dim = student.embed_dim
# if the network is a XCiT
elif args.arch in torch.hub.list("facebookresearch/xcit:main"):
student = torch.hub.load('facebookresearch/xcit:main', args.arch,
pretrained=False, drop_path_rate=args.drop_path_rate)
teacher = torch.hub.load('facebookresearch/xcit:main', args.arch, pretrained=False)
embed_dim = student.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
student = torchvision_models.__dict__[args.arch]()
teacher = torchvision_models.__dict__[args.arch]()
embed_dim = student.fc.weight.shape[1]
else:
print(f"Unknow architecture: {args.arch}")
# multi-crop wrapper handles forward with inputs of different resolutions
student = utils.MultiCropWrapper(student, DINOHead(
embed_dim,
args.out_dim,
use_bn=args.use_bn_in_head,
norm_last_layer=args.norm_last_layer,
))
teacher = utils.MultiCropWrapper(
teacher,
DINOHead(embed_dim, args.out_dim, args.use_bn_in_head),
)
# move networks to gpu
student, teacher = student.cuda(), teacher.cuda()
# synchronize batch norms (if any)
if utils.has_batchnorms(student):
student = nn.SyncBatchNorm.convert_sync_batchnorm(student)
teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher)
# we need DDP wrapper to have synchro batch norms working...
teacher = nn.parallel.DistributedDataParallel(teacher, device_ids=[args.gpu])
teacher_without_ddp = teacher.module
else:
# teacher_without_ddp and teacher are the same thing
teacher_without_ddp = teacher
student = nn.parallel.DistributedDataParallel(student, device_ids=[args.gpu], find_unused_parameters=True)
# teacher and student start with the same weights
teacher_without_ddp.load_state_dict(student.module.state_dict())
# there is no backpropagation through the teacher, so no need for gradients
for p in teacher.parameters():
p.requires_grad = False
print(f"Student and Teacher are built: they are both {args.arch} network.")
# ============ preparing loss ... ============
dino_loss = DINOLoss(
args.out_dim,
args.local_crops_number + 2, # total number of crops = 2 global crops + local_crops_number
args.warmup_teacher_temp,
args.teacher_temp,
args.warmup_teacher_temp_epochs,
args.epochs,
).cuda()
# ============ preparing optimizer ... ============
params_groups = utils.get_params_groups(student)
if args.optimizer == "adamw":
optimizer = torch.optim.AdamW(params_groups) # to use with ViTs
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler
elif args.optimizer == "lars":
optimizer = utils.LARS(params_groups) # to use with convnet and large batches
# for mixed precision training
fp16_scaler = None
if args.use_fp16:
fp16_scaler = torch.cuda.amp.GradScaler()
# ============ init schedulers ... ============
lr_schedule = utils.cosine_scheduler(
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
args.min_lr,
args.epochs, len(data_loader),
warmup_epochs=args.warmup_epochs,
)
wd_schedule = utils.cosine_scheduler(
args.weight_decay,
args.weight_decay_end,
args.epochs, len(data_loader),
)
# momentum parameter is increased to 1. during training with a cosine schedule
momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, 1,
args.epochs, len(data_loader))
print(f"Loss, optimizer and schedulers ready.")
# ============ optionally resume training ... ============
to_restore = {"epoch": 0}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth"),
run_variables=to_restore,
student=student,
teacher=teacher,
optimizer=optimizer,
fp16_scaler=fp16_scaler,
dino_loss=dino_loss,
)
start_epoch = to_restore["epoch"]
start_time = time.time()
print("Starting DINO training !")
for epoch in range(start_epoch, args.epochs):
data_loader.sampler.set_epoch(epoch)
# ============ training one epoch of DINO ... ============
train_stats = train_one_epoch(student, teacher, teacher_without_ddp, dino_loss,
data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule,
epoch, fp16_scaler, args)
# ============ writing logs ... ============
save_dict = {
'student': student.state_dict(),
'teacher': teacher.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch + 1,
'args': args,
'dino_loss': dino_loss.state_dict(),
}
if fp16_scaler is not None:
save_dict['fp16_scaler'] = fp16_scaler.state_dict()
utils.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth'))
if args.saveckp_freq and epoch % args.saveckp_freq == 0:
utils.save_on_master(save_dict, os.path.join(args.output_dir, f'checkpoint{epoch:04}.pth'))
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def train_one_epoch(student, teacher, teacher_without_ddp, dino_loss, data_loader,
optimizer, lr_schedule, wd_schedule, momentum_schedule,epoch,
fp16_scaler, args):
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
for it, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)):
# update weight decay and learning rate according to their schedule
it = len(data_loader) * epoch + it # global training iteration
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedule[it]
if i == 0: # only the first group is regularized
param_group["weight_decay"] = wd_schedule[it]
# move images to gpu
images = [im.cuda(non_blocking=True) for im in images]
# teacher and student forward passes + compute dino loss
with torch.cuda.amp.autocast(fp16_scaler is not None):
teacher_output = teacher(images[:2]) # only the 2 global views pass through the teacher
student_output = student(images)
loss = dino_loss(student_output, teacher_output, epoch)
print(f'dino_loss: {loss}')
loss = loss
if not math.isfinite(loss.item()):
print("Loss is {}, stopping training".format(loss.item()), force=True)
sys.exit(1)
# student update
optimizer.zero_grad()
param_norms = None
if fp16_scaler is None:
loss.backward()
if args.clip_grad:
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
optimizer.step()
else:
fp16_scaler.scale(loss).backward()
if args.clip_grad:
fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
fp16_scaler.step(optimizer)
fp16_scaler.update()
# EMA update for the teacher
with torch.no_grad():
m = momentum_schedule[it] # momentum parameter
for param_q, param_k in zip(student.module.parameters(), teacher_without_ddp.parameters()):
param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)
# logging
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(wd=optimizer.param_groups[0]["weight_decay"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class DINOLoss(nn.Module):
def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
student_out = student_output / self.student_temp
student_out = student_out.chunk(self.ncrops)
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_terms = 0
for iq, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == iq:
# we skip cases where student and teacher operate on the same view
continue
loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
total_loss += loss.mean()
n_loss_terms += 1
total_loss /= n_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
# ema update
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
### Custom Dataset Implemented to Load in [256-Length x 384-Dim] Tensors which correspond to extracted ViT-16 features for 4K x 4K patch
class SeqDataset(Dataset):
def __init__(self, dataroot, transform):
seq_list = os.listdir(dataroot)
self.seq_list = [os.path.join(dataroot, fname) for fname in seq_list]
self.transform = transform
def __getitem__(self, index):
seq = torch.load(self.seq_list[index])
label = torch.zeros(1,1)
return self.transform(seq), label
def __len__(self):
return len(self.seq_list)
### Modified Data Augmentaton for DINO for 4K x 4K resolutions for performing local / global crops on features in image grid
class DataAugmentationDINO4K(object):
def __init__(self, local_crops_number):
flip = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
])
# first global crop
self.global_transfo1 = transforms.Compose([
transforms.RandomCrop(14),
transforms.RandomHorizontalFlip(p=0.5),
])
# second global crop
self.global_transfo2 = transforms.Compose([
transforms.RandomCrop(14),
transforms.RandomHorizontalFlip(p=0.5),
])
# transformation for the local small crops
self.local_crops_number = local_crops_number
self.local_transfo = transforms.Compose([
transforms.RandomCrop(6),
transforms.RandomHorizontalFlip(p=0.5),
])
def __call__(self, image):
crops = []
image = image.unfold(0, 16, 16).transpose(0,1)
crops.append(self.global_transfo1(image))
crops.append(self.global_transfo2(image))
for _ in range(self.local_crops_number):
crops.append(self.local_transfo(image))
return crops
if __name__ == '__main__':
parser = argparse.ArgumentParser('DINO4K', parents=[get_args_parser()])
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
train_dino(args)
| 23,147 | 47.225 | 136 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_knn.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
def extract_feature_pipeline(args):
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, "train"), transform=transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, "val"), transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# ============ building network ... ============
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch](num_classes=0)
model.fc = nn.Identity()
else:
print(f"Architecture {args.arch} non supported")
sys.exit(1)
model.cuda()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
model.eval()
# ============ extract features ... ============
print("Extracting features for train set...")
train_features = extract_features(model, data_loader_train, args.use_cuda)
print("Extracting features for val set...")
test_features = extract_features(model, data_loader_val, args.use_cuda)
if utils.get_rank() == 0:
train_features = nn.functional.normalize(train_features, dim=1, p=2)
test_features = nn.functional.normalize(test_features, dim=1, p=2)
train_labels = torch.tensor([s[-1] for s in dataset_train.samples]).long()
test_labels = torch.tensor([s[-1] for s in dataset_val.samples]).long()
# save features and labels
if args.dump_features and dist.get_rank() == 0:
torch.save(train_features.cpu(), os.path.join(args.dump_features, "trainfeat.pth"))
torch.save(test_features.cpu(), os.path.join(args.dump_features, "testfeat.pth"))
torch.save(train_labels.cpu(), os.path.join(args.dump_features, "trainlabels.pth"))
torch.save(test_labels.cpu(), os.path.join(args.dump_features, "testlabels.pth"))
return train_features, test_features, train_labels, test_labels
@torch.no_grad()
def extract_features(model, data_loader, use_cuda=True, multiscale=False):
metric_logger = utils.MetricLogger(delimiter=" ")
features = None
for samples, index in metric_logger.log_every(data_loader, 10):
samples = samples.cuda(non_blocking=True)
index = index.cuda(non_blocking=True)
if multiscale:
feats = utils.multi_scale(samples, model)
else:
feats = model(samples).clone()
# init storage feature matrix
if dist.get_rank() == 0 and features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if use_cuda:
features = features.cuda(non_blocking=True)
print(f"Storing features into tensor of shape {features.shape}")
# get indexes from all processes
y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)
y_l = list(y_all.unbind(0))
y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)
y_all_reduce.wait()
index_all = torch.cat(y_l)
# share features between processes
feats_all = torch.empty(
dist.get_world_size(),
feats.size(0),
feats.size(1),
dtype=feats.dtype,
device=feats.device,
)
output_l = list(feats_all.unbind(0))
output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)
output_all_reduce.wait()
# update storage feature matrix
if dist.get_rank() == 0:
if use_cuda:
features.index_copy_(0, index_all, torch.cat(output_l))
else:
features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())
return features
@torch.no_grad()
def knn_classifier(train_features, train_labels, test_features, test_labels, k, T, num_classes=1000):
top1, top5, total = 0.0, 0.0, 0
train_features = train_features.t()
num_test_images, num_chunks = test_labels.shape[0], 100
imgs_per_chunk = num_test_images // num_chunks
retrieval_one_hot = torch.zeros(k, num_classes).to(train_features.device)
for idx in range(0, num_test_images, imgs_per_chunk):
# get the features for test images
features = test_features[
idx : min((idx + imgs_per_chunk), num_test_images), :
]
targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)]
batch_size = targets.shape[0]
# calculate the dot product and compute top-k neighbors
similarity = torch.mm(features, train_features)
distances, indices = similarity.topk(k, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieved_neighbors = torch.gather(candidates, 1, indices)
retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
distances_transform = distances.clone().div_(T).exp_()
probs = torch.sum(
torch.mul(
retrieval_one_hot.view(batch_size, -1, num_classes),
distances_transform.view(batch_size, -1, 1),
),
1,
)
_, predictions = probs.sort(1, True)
# find the predictions that match the target
correct = predictions.eq(targets.data.view(-1, 1))
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
top5 = top5 + correct.narrow(1, 0, min(5, k)).sum().item() # top5 does not make sense if k < 5
total += targets.size(0)
top1 = top1 * 100.0 / total
top5 = top5 * 100.0 / total
return top1, top5
class ReturnIndexDataset(datasets.ImageFolder):
def __getitem__(self, idx):
img, lab = super(ReturnIndexDataset, self).__getitem__(idx)
return img, idx
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with weighted k-NN on ImageNet')
parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
parser.add_argument('--nb_knn', default=[10, 20, 100, 200], nargs='+', type=int,
help='Number of NN to use. 20 is usually working the best.')
parser.add_argument('--temperature', default=0.07, type=float,
help='Temperature used in the voting coefficient')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag,
help="Should we store the features on GPU? We recommend setting this to False if you encounter OOM")
parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--dump_features', default=None,
help='Path where to save computed features, empty for no saving')
parser.add_argument('--load_features', default=None, help="""If the features have
already been computed, where to find them.""")
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('--data_path', default='/path/to/imagenet/', type=str)
args = parser.parse_args()
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
if args.load_features:
train_features = torch.load(os.path.join(args.load_features, "trainfeat.pth"))
test_features = torch.load(os.path.join(args.load_features, "testfeat.pth"))
train_labels = torch.load(os.path.join(args.load_features, "trainlabels.pth"))
test_labels = torch.load(os.path.join(args.load_features, "testlabels.pth"))
else:
# need to extract features !
train_features, test_features, train_labels, test_labels = extract_feature_pipeline(args)
if utils.get_rank() == 0:
if args.use_cuda:
train_features = train_features.cuda()
test_features = test_features.cuda()
train_labels = train_labels.cuda()
test_labels = test_labels.cuda()
print("Features are ready!\nStart the k-NN classification.")
for k in args.nb_knn:
top1, top5 = knn_classifier(train_features, train_labels,
test_features, test_labels, k, args.temperature)
print(f"{k}-NN classifier result: Top1: {top1}, Top5: {top5}")
dist.barrier()
| 11,128 | 44.798354 | 117 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/vision_transformer4k.py | import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
from vision_transformer import DINOHead
import math
from functools import partial
import torch
import torch.nn as nn
#from utils import trunc_normal_
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer4K(nn.Module):
""" Vision Transformer 4K """
def __init__(self, num_classes=0, img_size=[224], input_embed_dim=384, output_embed_dim = 192,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, num_prototypes=64, **kwargs):
super().__init__()
embed_dim = output_embed_dim
self.num_features = self.embed_dim = embed_dim
self.phi = nn.Sequential(*[nn.Linear(input_embed_dim, output_embed_dim), nn.GELU(), nn.Dropout(p=drop_rate)])
num_patches = int(img_size[0] // 16)**2
print("# of Patches:", num_patches)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // 1
h0 = h // 1
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
#print('preparing tokens (after crop)', x.shape)
self.mpp_feature = x
B, embed_dim, w, h = x.shape
x = x.flatten(2, 3).transpose(1,2)
x = self.phi(x)
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit4k_xs(patch_size=16, **kwargs):
model = VisionTransformer4K(
patch_size=patch_size, input_embed_dim=384, output_embed_dim=192,
depth=6, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 10,220 | 35.503571 | 123 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/main_dino.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
from vision_transformer import DINOHead
torchvision_archs = sorted(name for name in torchvision_models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision_models.__dict__[name]))
def get_args_parser():
parser = argparse.ArgumentParser('DINO', add_help=False)
# Model parameters
parser.add_argument('--arch', default='vit_small', type=str,
choices=['vit_tiny', 'vit_small', 'vit_base', 'xcit', 'deit_tiny', 'deit_small'] \
+ torchvision_archs + torch.hub.list("facebookresearch/xcit:main"),
help="""Name of architecture to train. For quick experiments with ViTs,
we recommend using vit_tiny or vit_small.""")
parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (vit_tiny, vit_small and vit_base). If <16, we recommend disabling
mixed precision training (--use_fp16 false) to avoid unstabilities.""")
parser.add_argument('--out_dim', default=65536, type=int, help="""Dimensionality of
the DINO head output. For complex and large datasets large values (like 65k) work well.""")
parser.add_argument('--norm_last_layer', default=True, type=utils.bool_flag,
help="""Whether or not to weight normalize the last layer of the DINO head.
Not normalizing leads to better performance but can make the training unstable.
In our experiments, we typically set this paramater to False with vit_small and True with vit_base.""")
parser.add_argument('--momentum_teacher', default=0.996, type=float, help="""Base EMA
parameter for teacher update. The value is increased to 1 during training with cosine schedule.
We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.""")
parser.add_argument('--use_bn_in_head', default=False, type=utils.bool_flag,
help="Whether to use batch normalizations in projection head (Default: False)")
# Temperature teacher parameters
parser.add_argument('--warmup_teacher_temp', default=0.04, type=float,
help="""Initial value for the teacher temperature: 0.04 works well in most cases.
Try decreasing it if the training loss does not decrease.""")
parser.add_argument('--teacher_temp', default=0.04, type=float, help="""Final value (after linear warmup)
of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend
starting with the default value of 0.04 and increase this slightly if needed.""")
parser.add_argument('--warmup_teacher_temp_epochs', default=0, type=int,
help='Number of warmup epochs for the teacher temperature (Default: 30).')
# Training/Optimization parameters
parser.add_argument('--use_fp16', type=utils.bool_flag, default=True, help="""Whether or not
to use half precision for training. Improves training time and memory requirements,
but can provoke instability and slight decay of performance. We recommend disabling
mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.""")
parser.add_argument('--weight_decay', type=float, default=0.04, help="""Initial value of the
weight decay. With ViT, a smaller value at the beginning of training works well.""")
parser.add_argument('--weight_decay_end', type=float, default=0.4, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--clip_grad', type=float, default=3.0, help="""Maximal parameter
gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can
help optimization for larger ViT architectures. 0 for disabling.""")
parser.add_argument('--batch_size_per_gpu', default=64, type=int,
help='Per-GPU batch-size : number of distinct images loaded on one GPU.')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument('--freeze_last_layer', default=1, type=int, help="""Number of epochs
during which we keep the output layer fixed. Typically doing so during
the first epoch helps training. Try increasing this value if the loss does not decrease.""")
parser.add_argument("--lr", default=0.0005, type=float, help="""Learning rate at the end of
linear warmup (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.""")
parser.add_argument("--warmup_epochs", default=10, type=int,
help="Number of epochs for the linear learning-rate warm up.")
parser.add_argument('--min_lr', type=float, default=1e-6, help="""Target LR at the
end of optimization. We use a cosine LR schedule with linear warmup.""")
parser.add_argument('--optimizer', default='adamw', type=str,
choices=['adamw', 'sgd', 'lars'], help="""Type of optimizer. We recommend using adamw with ViTs.""")
parser.add_argument('--drop_path_rate', type=float, default=0.1, help="stochastic depth rate")
# Multi-crop parameters
parser.add_argument('--global_crops_scale', type=float, nargs='+', default=(0.4, 1.),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we
recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""")
parser.add_argument('--local_crops_number', type=int, default=8, help="""Number of small
local views to generate. Set this parameter to 0 to disable multi-crop training.
When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """)
parser.add_argument('--local_crops_scale', type=float, nargs='+', default=(0.05, 0.4),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for small local view cropping of multi-crop.""")
# Misc
parser.add_argument('--data_path', default='/path/to/imagenet/train/', type=str,
help='Please specify path to the ImageNet training data.')
parser.add_argument('--output_dir', default=".", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--saveckp_freq', default=20, type=int, help='Save checkpoint every x epochs.')
parser.add_argument('--seed', default=0, type=int, help='Random seed.')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
return parser
def train_dino(args):
utils.init_distributed_mode(args)
utils.fix_random_seeds(args.seed)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ preparing data ... ============
transform = DataAugmentationDINO(
args.global_crops_scale,
args.local_crops_scale,
args.local_crops_number,
)
dataset = datasets.ImageFolder(args.data_path, transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset, shuffle=True)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True,
)
print(f"Data loaded: there are {len(dataset)} images.")
# ============ building student and teacher networks ... ============
# we changed the name DeiT-S for ViT-S to avoid confusions
args.arch = args.arch.replace("deit", "vit")
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
student = vits.__dict__[args.arch](
patch_size=args.patch_size,
drop_path_rate=args.drop_path_rate, # stochastic depth
)
teacher = vits.__dict__[args.arch](patch_size=args.patch_size)
embed_dim = student.embed_dim
# if the network is a XCiT
elif args.arch in torch.hub.list("facebookresearch/xcit:main"):
student = torch.hub.load('facebookresearch/xcit:main', args.arch,
pretrained=False, drop_path_rate=args.drop_path_rate)
teacher = torch.hub.load('facebookresearch/xcit:main', args.arch, pretrained=False)
embed_dim = student.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
student = torchvision_models.__dict__[args.arch]()
teacher = torchvision_models.__dict__[args.arch]()
embed_dim = student.fc.weight.shape[1]
else:
print(f"Unknow architecture: {args.arch}")
# multi-crop wrapper handles forward with inputs of different resolutions
student = utils.MultiCropWrapper(student, DINOHead(
embed_dim,
args.out_dim,
use_bn=args.use_bn_in_head,
norm_last_layer=args.norm_last_layer,
))
teacher = utils.MultiCropWrapper(
teacher,
DINOHead(embed_dim, args.out_dim, args.use_bn_in_head),
)
# move networks to gpu
student, teacher = student.cuda(), teacher.cuda()
# synchronize batch norms (if any)
if utils.has_batchnorms(student):
student = nn.SyncBatchNorm.convert_sync_batchnorm(student)
teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher)
# we need DDP wrapper to have synchro batch norms working...
teacher = nn.parallel.DistributedDataParallel(teacher, device_ids=[args.gpu])
teacher_without_ddp = teacher.module
else:
# teacher_without_ddp and teacher are the same thing
teacher_without_ddp = teacher
student = nn.parallel.DistributedDataParallel(student, device_ids=[args.gpu])
# teacher and student start with the same weights
teacher_without_ddp.load_state_dict(student.module.state_dict())
# there is no backpropagation through the teacher, so no need for gradients
for p in teacher.parameters():
p.requires_grad = False
print(f"Student and Teacher are built: they are both {args.arch} network.")
# ============ preparing loss ... ============
dino_loss = DINOLoss(
args.out_dim,
args.local_crops_number + 2, # total number of crops = 2 global crops + local_crops_number
args.warmup_teacher_temp,
args.teacher_temp,
args.warmup_teacher_temp_epochs,
args.epochs,
).cuda()
# ============ preparing optimizer ... ============
params_groups = utils.get_params_groups(student)
if args.optimizer == "adamw":
optimizer = torch.optim.AdamW(params_groups) # to use with ViTs
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler
elif args.optimizer == "lars":
optimizer = utils.LARS(params_groups) # to use with convnet and large batches
# for mixed precision training
fp16_scaler = None
if args.use_fp16:
fp16_scaler = torch.cuda.amp.GradScaler()
# ============ init schedulers ... ============
lr_schedule = utils.cosine_scheduler(
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
args.min_lr,
args.epochs, len(data_loader),
warmup_epochs=args.warmup_epochs,
)
wd_schedule = utils.cosine_scheduler(
args.weight_decay,
args.weight_decay_end,
args.epochs, len(data_loader),
)
# momentum parameter is increased to 1. during training with a cosine schedule
momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, 1,
args.epochs, len(data_loader))
print(f"Loss, optimizer and schedulers ready.")
# ============ optionally resume training ... ============
to_restore = {"epoch": 0}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth"),
run_variables=to_restore,
student=student,
teacher=teacher,
optimizer=optimizer,
fp16_scaler=fp16_scaler,
dino_loss=dino_loss,
)
start_epoch = to_restore["epoch"]
start_time = time.time()
print("Starting DINO training !")
for epoch in range(start_epoch, args.epochs):
data_loader.sampler.set_epoch(epoch)
# ============ training one epoch of DINO ... ============
train_stats = train_one_epoch(student, teacher, teacher_without_ddp, dino_loss,
data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule,
epoch, fp16_scaler, args)
# ============ writing logs ... ============
save_dict = {
'student': student.state_dict(),
'teacher': teacher.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch + 1,
'args': args,
'dino_loss': dino_loss.state_dict(),
}
if fp16_scaler is not None:
save_dict['fp16_scaler'] = fp16_scaler.state_dict()
utils.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth'))
if args.saveckp_freq and epoch % args.saveckp_freq == 0:
utils.save_on_master(save_dict, os.path.join(args.output_dir, f'checkpoint{epoch:04}.pth'))
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def train_one_epoch(student, teacher, teacher_without_ddp, dino_loss, data_loader,
optimizer, lr_schedule, wd_schedule, momentum_schedule,epoch,
fp16_scaler, args):
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
for it, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)):
# update weight decay and learning rate according to their schedule
it = len(data_loader) * epoch + it # global training iteration
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedule[it]
if i == 0: # only the first group is regularized
param_group["weight_decay"] = wd_schedule[it]
# move images to gpu
images = [im.cuda(non_blocking=True) for im in images]
# teacher and student forward passes + compute dino loss
with torch.cuda.amp.autocast(fp16_scaler is not None):
teacher_output = teacher(images[:2]) # only the 2 global views pass through the teacher
student_output = student(images)
loss = dino_loss(student_output, teacher_output, epoch)
if not math.isfinite(loss.item()):
print("Loss is {}, stopping training".format(loss.item()), force=True)
sys.exit(1)
# student update
optimizer.zero_grad()
param_norms = None
if fp16_scaler is None:
loss.backward()
if args.clip_grad:
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
optimizer.step()
else:
fp16_scaler.scale(loss).backward()
if args.clip_grad:
fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
fp16_scaler.step(optimizer)
fp16_scaler.update()
# EMA update for the teacher
with torch.no_grad():
m = momentum_schedule[it] # momentum parameter
for param_q, param_k in zip(student.module.parameters(), teacher_without_ddp.parameters()):
param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)
# logging
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(wd=optimizer.param_groups[0]["weight_decay"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class DINOLoss(nn.Module):
def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
student_out = student_output / self.student_temp
student_out = student_out.chunk(self.ncrops)
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_terms = 0
for iq, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == iq:
# we skip cases where student and teacher operate on the same view
continue
loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
total_loss += loss.mean()
n_loss_terms += 1
total_loss /= n_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
# ema update
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
class DataAugmentationDINO(object):
def __init__(self, global_crops_scale, local_crops_scale, local_crops_number):
flip_and_color_jitter = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
])
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
# first global crop
self.global_transfo1 = transforms.Compose([
transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(1.0),
normalize,
])
# second global crop
self.global_transfo2 = transforms.Compose([
transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(0.1),
utils.Solarization(0.2),
normalize,
])
# transformation for the local small crops
self.local_crops_number = local_crops_number
self.local_transfo = transforms.Compose([
transforms.RandomResizedCrop(96, scale=local_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(p=0.5),
normalize,
])
def __call__(self, image):
crops = []
crops.append(self.global_transfo1(image))
crops.append(self.global_transfo2(image))
for _ in range(self.local_crops_number):
crops.append(self.local_transfo(image))
return crops
if __name__ == '__main__':
parser = argparse.ArgumentParser('DINO', parents=[get_args_parser()])
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
train_dino(args)
| 22,945 | 47.614407 | 114 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_video_segmentation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some parts are taken from https://github.com/Liusifei/UVC
"""
import os
import copy
import glob
import queue
from urllib.request import urlopen
import argparse
import numpy as np
from tqdm import tqdm
import cv2
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
from torchvision import transforms
import utils
import vision_transformer as vits
@torch.no_grad()
def eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette):
"""
Evaluate tracking on a video given first frame & segmentation
"""
video_folder = os.path.join(args.output_dir, video_dir.split('/')[-1])
os.makedirs(video_folder, exist_ok=True)
# The queue stores the n preceeding frames
que = queue.Queue(args.n_last_frames)
# first frame
frame1, ori_h, ori_w = read_frame(frame_list[0])
# extract first frame feature
frame1_feat = extract_feature(model, frame1).T # dim x h*w
# saving first segmentation
out_path = os.path.join(video_folder, "00000.png")
imwrite_indexed(out_path, seg_ori, color_palette)
mask_neighborhood = None
for cnt in tqdm(range(1, len(frame_list))):
frame_tar = read_frame(frame_list[cnt])[0]
# we use the first segmentation and the n previous ones
used_frame_feats = [frame1_feat] + [pair[0] for pair in list(que.queue)]
used_segs = [first_seg] + [pair[1] for pair in list(que.queue)]
frame_tar_avg, feat_tar, mask_neighborhood = label_propagation(args, model, frame_tar, used_frame_feats, used_segs, mask_neighborhood)
# pop out oldest frame if neccessary
if que.qsize() == args.n_last_frames:
que.get()
# push current results into queue
seg = copy.deepcopy(frame_tar_avg)
que.put([feat_tar, seg])
# upsampling & argmax
frame_tar_avg = F.interpolate(frame_tar_avg, scale_factor=args.patch_size, mode='bilinear', align_corners=False, recompute_scale_factor=False)[0]
frame_tar_avg = norm_mask(frame_tar_avg)
_, frame_tar_seg = torch.max(frame_tar_avg, dim=0)
# saving to disk
frame_tar_seg = np.array(frame_tar_seg.squeeze().cpu(), dtype=np.uint8)
frame_tar_seg = np.array(Image.fromarray(frame_tar_seg).resize((ori_w, ori_h), 0))
frame_nm = frame_list[cnt].split('/')[-1].replace(".jpg", ".png")
imwrite_indexed(os.path.join(video_folder, frame_nm), frame_tar_seg, color_palette)
def restrict_neighborhood(h, w):
# We restrict the set of source nodes considered to a spatial neighborhood of the query node (i.e. ``local attention'')
mask = torch.zeros(h, w, h, w)
for i in range(h):
for j in range(w):
for p in range(2 * args.size_mask_neighborhood + 1):
for q in range(2 * args.size_mask_neighborhood + 1):
if i - args.size_mask_neighborhood + p < 0 or i - args.size_mask_neighborhood + p >= h:
continue
if j - args.size_mask_neighborhood + q < 0 or j - args.size_mask_neighborhood + q >= w:
continue
mask[i, j, i - args.size_mask_neighborhood + p, j - args.size_mask_neighborhood + q] = 1
mask = mask.reshape(h * w, h * w)
return mask.cuda(non_blocking=True)
def norm_mask(mask):
c, h, w = mask.size()
for cnt in range(c):
mask_cnt = mask[cnt,:,:]
if(mask_cnt.max() > 0):
mask_cnt = (mask_cnt - mask_cnt.min())
mask_cnt = mask_cnt/mask_cnt.max()
mask[cnt,:,:] = mask_cnt
return mask
def label_propagation(args, model, frame_tar, list_frame_feats, list_segs, mask_neighborhood=None):
"""
propagate segs of frames in list_frames to frame_tar
"""
## we only need to extract feature of the target frame
feat_tar, h, w = extract_feature(model, frame_tar, return_h_w=True)
return_feat_tar = feat_tar.T # dim x h*w
ncontext = len(list_frame_feats)
feat_sources = torch.stack(list_frame_feats) # nmb_context x dim x h*w
feat_tar = F.normalize(feat_tar, dim=1, p=2)
feat_sources = F.normalize(feat_sources, dim=1, p=2)
feat_tar = feat_tar.unsqueeze(0).repeat(ncontext, 1, 1)
aff = torch.exp(torch.bmm(feat_tar, feat_sources) / 0.1) # nmb_context x h*w (tar: query) x h*w (source: keys)
if args.size_mask_neighborhood > 0:
if mask_neighborhood is None:
mask_neighborhood = restrict_neighborhood(h, w)
mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)
aff *= mask_neighborhood
aff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)
tk_val, _ = torch.topk(aff, dim=0, k=args.topk)
tk_val_min, _ = torch.min(tk_val, dim=0)
aff[aff < tk_val_min] = 0
aff = aff / torch.sum(aff, keepdim=True, axis=0)
list_segs = [s.cuda() for s in list_segs]
segs = torch.cat(list_segs)
nmb_context, C, h, w = segs.shape
segs = segs.reshape(nmb_context, C, -1).transpose(2, 1).reshape(-1, C).T # C x nmb_context*h*w
seg_tar = torch.mm(segs, aff)
seg_tar = seg_tar.reshape(1, C, h, w)
return seg_tar, return_feat_tar, mask_neighborhood
def extract_feature(model, frame, return_h_w=False):
"""Extract one frame feature everytime."""
out = model.get_intermediate_layers(frame.unsqueeze(0).cuda(), n=1)[0]
out = out[:, 1:, :] # we discard the [CLS] token
h, w = int(frame.shape[1] / model.patch_embed.patch_size), int(frame.shape[2] / model.patch_embed.patch_size)
dim = out.shape[-1]
out = out[0].reshape(h, w, dim)
out = out.reshape(-1, dim)
if return_h_w:
return out, h, w
return out
def imwrite_indexed(filename, array, color_palette):
""" Save indexed png for DAVIS."""
if np.atleast_3d(array).shape[2] != 1:
raise Exception("Saving indexed PNGs requires 2D array.")
im = Image.fromarray(array)
im.putpalette(color_palette.ravel())
im.save(filename, format='PNG')
def to_one_hot(y_tensor, n_dims=None):
"""
Take integer y (tensor or variable) with n dims &
convert it to 1-hot representation with n+1 dims.
"""
if(n_dims is None):
n_dims = int(y_tensor.max()+ 1)
_,h,w = y_tensor.size()
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
y_one_hot = y_one_hot.view(h,w,n_dims)
return y_one_hot.permute(2, 0, 1).unsqueeze(0)
def read_frame_list(video_dir):
frame_list = [img for img in glob.glob(os.path.join(video_dir,"*.jpg"))]
frame_list = sorted(frame_list)
return frame_list
def read_frame(frame_dir, scale_size=[480]):
"""
read a single frame & preprocess
"""
img = cv2.imread(frame_dir)
ori_h, ori_w, _ = img.shape
if len(scale_size) == 1:
if(ori_h > ori_w):
tw = scale_size[0]
th = (tw * ori_h) / ori_w
th = int((th // 64) * 64)
else:
th = scale_size[0]
tw = (th * ori_w) / ori_h
tw = int((tw // 64) * 64)
else:
th, tw = scale_size
img = cv2.resize(img, (tw, th))
img = img.astype(np.float32)
img = img / 255.0
img = img[:, :, ::-1]
img = np.transpose(img.copy(), (2, 0, 1))
img = torch.from_numpy(img).float()
img = color_normalize(img)
return img, ori_h, ori_w
def read_seg(seg_dir, factor, scale_size=[480]):
seg = Image.open(seg_dir)
_w, _h = seg.size # note PIL.Image.Image's size is (w, h)
if len(scale_size) == 1:
if(_w > _h):
_th = scale_size[0]
_tw = (_th * _w) / _h
_tw = int((_tw // 64) * 64)
else:
_tw = scale_size[0]
_th = (_tw * _h) / _w
_th = int((_th // 64) * 64)
else:
_th = scale_size[1]
_tw = scale_size[0]
small_seg = np.array(seg.resize((_tw // factor, _th // factor), 0))
small_seg = torch.from_numpy(small_seg.copy()).contiguous().float().unsqueeze(0)
return to_one_hot(small_seg), np.asarray(seg)
def color_normalize(x, mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]):
for t, m, s in zip(x, mean, std):
t.sub_(m)
t.div_(s)
return x
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with video object segmentation on DAVIS 2017')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--arch', default='vit_small', type=str,
choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--output_dir', default=".", help='Path where to save segmentations')
parser.add_argument('--data_path', default='/path/to/davis/', type=str)
parser.add_argument("--n_last_frames", type=int, default=7, help="number of preceeding frames")
parser.add_argument("--size_mask_neighborhood", default=12, type=int,
help="We restrict the set of source nodes considered to a spatial neighborhood of the query node")
parser.add_argument("--topk", type=int, default=5, help="accumulate label from top k neighbors")
parser.add_argument("--bs", type=int, default=6, help="Batch size, try to reduce if OOM")
args = parser.parse_args()
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
# building network
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
model.cuda()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
for param in model.parameters():
param.requires_grad = False
model.eval()
color_palette = []
for line in urlopen("https://raw.githubusercontent.com/Liusifei/UVC/master/libs/data/palette.txt"):
color_palette.append([int(i) for i in line.decode("utf-8").split('\n')[0].split(" ")])
color_palette = np.asarray(color_palette, dtype=np.uint8).reshape(-1,3)
video_list = open(os.path.join(args.data_path, "ImageSets/2017/val.txt")).readlines()
for i, video_name in enumerate(video_list):
video_name = video_name.strip()
print(f'[{i}/{len(video_list)}] Begin to segmentate video {video_name}.')
video_dir = os.path.join(args.data_path, "JPEGImages/480p/", video_name)
frame_list = read_frame_list(video_dir)
seg_path = frame_list[0].replace("JPEGImages", "Annotations").replace("jpg", "png")
first_seg, seg_ori = read_seg(seg_path, args.patch_size)
eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette)
| 11,835 | 39.395904 | 153 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/patch_extraction.py | ### Dependencies
# Base Dependencies
import os
import pickle
import sys
# LinAlg / Stats / Plotting Dependencies
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
import umap
import umap.plot
from tqdm import tqdm
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
import torch.utils.data.dataset as Dataset
from torchvision import transforms
from pl_bolts.models.self_supervised import resnets
from pl_bolts.utils.semi_supervised import Identity
device = torch.device('cuda:0')
torch.multiprocessing.set_sharing_strategy('file_system')
# Model Architectures
from nn_encoder_arch.vision_transformer import vit_small
from nn_encoder_arch.resnet_trunc import resnet50_trunc_baseline
### Extracting Patch Features
patch_datasets = 'path/to/patch/datasets'
library_path = './embeddings_patch_library/'
os.makedirs(library_path, exist_ok=True)
models = ['resnet50_trunc', 'resnet50_tcga_brca_simclr', 'vits_tcga_brca_dino']
for enc_name in models:
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='crc100knonorm')
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='crc100k')
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='bcss')
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='breastpathq') | 1,521 | 34.395349 | 125 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/slide_extraction_utils.py | # Base Dependencies
import os
import pickle
import sys
j_ = os.path.join
# LinAlg / Stats / Plotting Dependencies
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
# Scikit-Learn Imports
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score, StratifiedKFold
#Torch Imports
import torch
import torch.nn as nn
from torch.utils.data.dataset import Dataset
torch.multiprocessing.set_sharing_strategy('file_system')
def series_intersection(s1, s2):
r"""
Takes the intersection of two pandas.Series (pd.Series) objects.
Args:
- s1 (pd.Series): pd.Series object.
- s2 (pd.Series): pd.Series object.
Return:
- pd.Series: Intersection of s1 and s2.
"""
return pd.Series(list(set(s1) & set(s2)))
def save_embeddings_mean(save_pickle_fpath, dataset):
r"""
Saves+Pickle each WSI in a SlideEmbeddingDataset Object as the average of its instance-level embeddings
Args:
- save_fpath (str): Save filepath for the pickle object.
- dataset (torch.utils.data.dataset): SlideEmbeddingDataset_WS object that iterates+loads each WSI in a folder
Return:
- None
"""
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4)
embeddings, labels = [], []
for batch, target in dataloader:
with torch.no_grad():
embeddings.append(batch.squeeze(dim=0).mean(dim=0).numpy())
labels.append(target.numpy())
embeddings = np.vstack(embeddings)
labels = np.vstack(labels).squeeze()
asset_dict = {'embeddings': embeddings, 'labels': labels}
with open(save_pickle_fpath, 'wb') as handle:
pickle.dump(asset_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
class SlideEmbeddingSplitDataset(Dataset):
r"""
torch.utils.data.dataset object that iterates+loads each WSI from a split CSV file
Args:
- dataroot (str): Path to wsi_labels.csv.
- tcga_csv (pd.DataFrame): Clinical CSV (as a pd.DataFrame object) for a TCGA Study
- pt_path (str): Path to folder of saved instance-level feature embeddings for each WSI.
- splits_csv (pd.DataFrame): DataFrame which contains slide_ids for train / val / test
- label_col (str): Which column to use as labels in tcga_csv
- label_dict (dict): Dictionary for categorizing labels
Return:
- None
"""
def __init__(self, dataroot, tcga_csv, pt_path, splits_csv=None,
label_col='oncotree_code', label_dict={'LUSC':0, 'LUAD':1}):
self.csv = pd.read_csv(os.path.join(dataroot, 'tcga_wsi_labels.csv'))
self.csv['slide_path'] = pt_path+self.csv['slide_id']
self.csv = self.csv.set_index('slide_id', drop=True).drop(['Unnamed: 0'], axis=1)
self.csv.index = self.csv.index.str[:-3]
self.csv.index.name = None
self.csv = self.csv.join(tcga_csv, how='inner')
if splits_csv is not None:
self.csv = self.csv.loc[series_intersection(splits_csv.dropna(), self.csv.index)]
self.label_col = label_col
self.label_dict = label_dict
### If using DINO Features, subset and save only the last 384-dim features.
if 'dino_pt_patch_features' in pt_path:
self.last_stage = True
else:
self.last_stage = False
def __getitem__(self, index):
x = torch.load(self.csv['slide_path'][index])
if self.last_stage and x.shape[1] == 1536:
x = x[:,(1536-384):1536]
label = torch.Tensor([self.label_dict[self.csv[self.label_col][index]]]).to(torch.long)
return x, label
def __len__(self):
return self.csv.shape[0]
def create_slide_embeddings(dataroot, saveroot, enc_name, study):
r"""
"""
path2csv = '../Weakly-Supervised-Subtyping/dataset_csv/'
path2splits = '../Weakly-Supervised-Subtyping/splits/'
splits_folder = j_(path2splits, '10foldcv_subtype', study)
tcga_csv = pd.read_csv(j_(path2csv, f'{study}_subset.csv.zip'), index_col=2)['oncotree_code']
tcga_csv.index = tcga_csv.index.str[:-4]
tcga_csv.index.name = None
save_embedding_dir = j_(saveroot, enc_name)
os.makedirs(save_embedding_dir, exist_ok=True)
if enc_name == 'vit256mean':
pt_path = j_(dataroot, 'vit256mean_tcga_slide_embeddings')
elif enc_name == 'vit16mean':
extracted_dir = f'{study}/extracted_mag20x_patch256_fp/vits_tcga_pancancer_dino_pt_patch_features/'
pt_path = j_(dataroot, extracted_dir)
elif enc_name == 'resnet50mean':
extracted_dir = f'{study}/extracted_mag20x_patch256_fp/resnet50_trunc_pt_patch_features/'
pt_path = j_(dataroot, extracted_dir)
if study == 'tcga_brca':
label_dict={'IDC':0, 'ILC':1}
tcga_csv = tcga_csv[tcga_csv.str.contains('IDC|ILC')]
elif study == 'tcga_kidney':
label_dict={'CCRCC':0, 'PRCC':1, 'CHRCC': 2}
elif study == 'tcga_lung':
label_dict={'LUSC':0, 'LUAD':1}
for i in tqdm(range(10)):
splits_csv = pd.read_csv(os.path.join(splits_folder, f'splits_{i}.csv'), index_col=0)
train = SlideEmbeddingSplitDataset(dataroot=dataroot, tcga_csv=tcga_csv, pt_path=pt_path,
splits_csv=splits_csv['train'], label_dict=label_dict)
test = SlideEmbeddingSplitDataset(dataroot=dataroot, tcga_csv=tcga_csv, pt_path=pt_path,
splits_csv=splits_csv['test'], label_dict=label_dict)
save_embeddings_mean(j_(save_embedding_dir, f'{study}_{enc_name}_class_split_train_{i}.pkl'), train)
save_embeddings_mean(j_(save_embedding_dir, f'{study}_{enc_name}_class_split_test_{i}.pkl'), test) | 6,006 | 37.754839 | 118 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/patch_extraction_utils.py | ### Dependencies
# Base Dependencies
import os
import pickle
import sys
# LinAlg / Stats / Plotting Dependencies
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
import umap
import umap.plot
from tqdm import tqdm
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from pl_bolts.models.self_supervised import resnets
from pl_bolts.utils.semi_supervised import Identity
device = torch.device('cuda:0')
torch.multiprocessing.set_sharing_strategy('file_system')
# Model Architectures
from nn_encoder_arch.vision_transformer import vit_small
from nn_encoder_arch.resnet_trunc import resnet50_trunc_baseline
### Helper Functions for Normalization + Loading in pytorch_lightning SSL encoder (for SimCLR)
def eval_transforms(pretrained=False):
if pretrained:
mean, std = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
else:
mean, std = (0.5,0.5,0.5), (0.5,0.5,0.5)
trnsfrms_val = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean = mean, std = std)])
return trnsfrms_val
def torchvision_ssl_encoder(name: str, pretrained: bool = False, return_all_feature_maps: bool = False):
pretrained_model = getattr(resnets, name)(pretrained=pretrained, return_all_feature_maps=return_all_feature_maps)
pretrained_model.fc = Identity()
return pretrained_model
### Wrapper Classes for loading in patch datasets for BreastPathQ + BCSS (CRC100K uses the ImageFolder Dataset Class)
class CSVDataset_BreastPathQ(Dataset):
def __init__(self, dataroot, csv_path, transforms_eval=eval_transforms()):
self.csv = pd.read_csv(csv_path)
self.csv['img_path'] = dataroot+self.csv['slide'].astype(str) + "_" + self.csv['rid'].astype(str) + '.tif'
self.transforms = transforms_eval
def __getitem__(self, index):
img = Image.open(self.csv['img_path'][index])
return self.transforms(img), self.csv['y'][index]
def __len__(self):
return self.csv.shape[0]
class CSVDataset_BCSS(Dataset):
def __init__(self, dataset_csv, is_train=1, transforms_eval=eval_transforms()):
self.csv = dataset_csv
self.csv = self.csv[self.csv['train']==is_train]
self.transforms = transforms_eval
def __getitem__(self, index):
img = Image.open(self.csv.index[index])
return self.transforms(img), self.csv.iloc[index]['label']
def __len__(self):
return self.csv.shape[0]
### Functions for Loading + Saving + Visualizing Patch Embeddings
def save_embeddings(model, fname, dataloader, dataset=None, is_imagefolder=False,
save_patches=False, sprite_dim=128, overwrite=False):
if os.path.isfile('%s.pkl' % fname) and (overwrite == False):
return None
embeddings, labels = [], []
patches = []
for batch, target in tqdm(dataloader):
if save_patches:
for img in batch:
patches.append(tensor2im(input_image=img).resize(sprite_dim))
with torch.no_grad():
batch = batch.to(device)
embeddings.append(model(batch).detach().cpu().numpy())
labels.append(target.numpy())
embeddings = np.vstack(embeddings)
labels = np.vstack(labels).squeeze()
if is_imagefolder:
id2label = dict(map(reversed, dataset.class_to_idx.items()))
labels = np.array(list(map(id2label.get, labels.ravel())))
asset_dict = {'embeddings': embeddings, 'labels': labels}
if save_patches:
asset_dict.update({'patches': patches})
with open('%s.pkl' % (fname), 'wb') as handle:
pickle.dump(asset_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def create_UMAP(library_path, save_path, dataset, enc_name, n=15, d=0.1):
path = os.path.join(library_path, '%s_%s.pkl' % (dataset, enc_name))
with open(path, 'rb') as handle:
asset_dict = pickle.load(handle)
embeddings, labels = asset_dict['embeddings'], asset_dict['labels']
if 'crc100k' in dataset:
labels[labels=='MUS'] = 'STR'
mapper = umap.UMAP(n_neighbors=n, min_dist=d).fit(embeddings)
fig = plt.figure(figsize=(10, 10), dpi=100)
umap.plot.points(mapper, labels=labels, width=600, height=600)
plt.tight_layout()
plt.savefig(os.path.join(save_path, '%s_%s_umap_n%d_d%0.2f.jpg' % (dataset, enc_name, n, d)))
def create_embeddings(embeddings_dir, enc_name, dataset, save_patches=False, sprite_dim=128,
patch_datasets='path/to/patch/datasets', assets_dir ='./ckpts/',
disentangle=-1, stage=-1):
print("Extracting Features for '%s' via '%s'" % (dataset, enc_name))
if enc_name == 'resnet50_trunc':
model = resnet50_trunc_baseline(pretrained=True)
eval_t = eval_transforms(pretrained=True)
elif 'dino' in enc_name:
ckpt_path = os.path.join(assets_dir, enc_name+'.pt')
assert os.path.isfile(ckpt_path)
model = vit_small(patch_size=16)
state_dict = torch.load(ckpt_path, map_location="cpu")['teacher']
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
#print("Missing Keys:", missing_keys)
#print("Unexpected Keys:", unexpected_keys)
eval_t = eval_transforms(pretrained=False)
elif 'simclr' in enc_name:
ckpt_path = os.path.join(assets_dir, enc_name+'.pt')
assert os.path.isfile(ckpt_path)
model = torchvision_ssl_encoder('resnet50', pretrained=True)
missing_keys, unexpected_keys = model.load_state_dict(torch.load(ckpt_path), strict=False)
#print("Missing Keys:", missing_keys)
#print("Unexpected Keys:", unexpected_keys)
eval_t = eval_transforms(pretrained=False)
else:
pass
model = model.to(device)
model.eval()
if 'simclr' in enc_name or 'simsiam' in enc_name:
_model = model
model = lambda x: _model.forward(x)[0]
elif 'dino' in enc_name:
_model = model
if stage == -1:
model = _model
else:
model = lambda x: torch.cat([x[:, 0] for x in _model.get_intermediate_layers(x, stage)], dim=-1)
if stage != -1:
_stage = '_s%d' % stage
else:
_stage = ''
if dataset == 'crc100k':
### Train
dataroot = os.path.join(patch_datasets, 'NCT-CRC-HE-100K/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100k_train_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
### Test
dataroot = os.path.join(patch_datasets, 'CRC-VAL-HE-7K/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100k_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
elif dataset == 'crc100knonorm':
### Train
dataroot = os.path.join(patch_datasets, 'NCT-CRC-HE-100K-NONORM/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100knonorm_train_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
### Test
dataroot = os.path.join(patch_datasets, 'CRC-VAL-HE-7K/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100knonorm_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
elif dataset == 'breastpathq':
train_dataroot = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/train/')
val_dataroot = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/validation/')
train_csv = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/train_labels.csv')
val_csv = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/val_labels.csv')
train_dataset = CSVDataset_BreastPathQ(dataroot=train_dataroot, csv_path=train_csv, transforms_eval=eval_t)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=4)
val_dataset = CSVDataset_BreastPathQ(dataroot=val_dataroot, csv_path=val_csv, transforms_eval=eval_t)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=4)
train_fname = os.path.join(embeddings_dir, 'breastpathq_train_%s%s' % (enc_name, _stage))
val_fname = os.path.join(embeddings_dir, 'breastpathq_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=train_fname, dataloader=train_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim)
save_embeddings(model=model, fname=val_fname, dataloader=val_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim)
elif dataset == 'bcss':
dataroot = os.path.join(patch_datasets, 'BCSS/40x/patches/All/')
csv_path = os.path.join(patch_datasets, 'BCSS/40x/patches/summary.csv')
dataset_csv = pd.read_csv(csv_path, sep=' ')['filename,train'].str.split(',', expand=True).astype(int)
dataset_csv.columns = ['label', 'train']
dataset_csv = dataset_csv[dataset_csv['label'].isin([0,1,2,3])]
dataset_csv.index = [os.path.join(dataroot, fname+'.png') for fname in dataset_csv.index]
train_dataset = CSVDataset_BCSS(dataset_csv=dataset_csv, is_train=1, transforms_eval=eval_t)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=1)
val_dataset = CSVDataset_BCSS(dataset_csv=dataset_csv, is_train=0, transforms_eval=eval_t)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1)
train_fname = os.path.join(embeddings_dir, 'bcss_train_%s%s' % (enc_name, _stage))
val_fname = os.path.join(embeddings_dir, 'bcss_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=train_fname, dataloader=train_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim)
save_embeddings(model=model, fname=val_fname, dataloader=val_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim) | 11,702 | 46.573171 | 117 | py |
HIPT | HIPT-master/HIPT_4K/hipt_4k.py | ### Dependencies
# Base Dependencies
import os
import pickle
import sys
# LinAlg / Stats / Plotting Dependencies
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from tqdm import tqdm
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
from torchvision import transforms
from einops import rearrange, repeat
torch.multiprocessing.set_sharing_strategy('file_system')
# Local Dependencies
import vision_transformer as vits
import vision_transformer4k as vits4k
from hipt_heatmap_utils import *
from hipt_model_utils import get_vit256, get_vit4k, tensorbatch2im, eval_transforms, roll_batch2img
class HIPT_4K(torch.nn.Module):
"""
HIPT Model (ViT-4K) for encoding non-square images (with [256 x 256] patch tokens), with
[256 x 256] patch tokens encoded via ViT-256 using [16 x 16] patch tokens.
"""
def __init__(self,
model256_path: str = '../Checkpoints/vit256_small_dino.pth',
model4k_path: str = '../Checkpoints/vit4k_xs_dino.pth',
device256=torch.device('cuda:0'),
device4k=torch.device('cuda:1')):
super().__init__()
self.model256 = get_vit256(pretrained_weights=model256_path).to(device256)
self.model4k = get_vit4k(pretrained_weights=model4k_path).to(device4k)
self.device256 = device256
self.device4k = device4k
def forward(self, x):
"""
Forward pass of HIPT (given an image tensor x), outputting the [CLS] token from ViT-4K.
1. x is center-cropped such that the W / H is divisible by the patch token size in ViT-4K (e.g. - 256 x 256).
2. x then gets unfolded into a "batch" of [256 x 256] images.
3. A pretrained ViT-256 model extracts the CLS token from each [256 x 256] image in the batch.
4. These batch-of-features are then reshaped into a 2D feature grid (of width "w_256" and height "h_256".)
5. This feature grid is then used as the input to ViT-4K, outputting [CLS]_4K.
Args:
- x (torch.Tensor): [1 x C x W' x H'] image tensor.
Return:
- features_cls4k (torch.Tensor): [1 x 192] cls token (d_4k = 192 by default).
"""
batch_256, w_256, h_256 = self.prepare_img_tensor(x) # 1. [1 x 3 x W x H]
batch_256 = batch_256.unfold(2, 256, 256).unfold(3, 256, 256) # 2. [1 x 3 x w_256 x h_256 x 256 x 256]
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h') # 2. [B x 3 x 256 x 256], where B = (1*w_256*h_256)
features_cls256 = []
for mini_bs in range(0, batch_256.shape[0], 256): # 3. B may be too large for ViT-256. We further take minibatches of 256.
minibatch_256 = batch_256[mini_bs:mini_bs+256].to(self.device256, non_blocking=True)
features_cls256.append(self.model256(minibatch_256).detach().cpu()) # 3. Extracting ViT-256 features from [256 x 3 x 256 x 256] image batches.
features_cls256 = torch.vstack(features_cls256) # 3. [B x 384], where 384 == dim of ViT-256 [ClS] token.
features_cls256 = features_cls256.reshape(w_256, h_256, 384).transpose(0,1).transpose(0,2).unsqueeze(dim=0)
features_cls256 = features_cls256.to(self.device4k, non_blocking=True) # 4. [1 x 384 x w_256 x h_256]
features_cls4k = self.model4k.forward(features_cls256) # 5. [1 x 192], where 192 == dim of ViT-4K [ClS] token.
return features_cls4k
def forward_asset_dict(self, x: torch.Tensor):
"""
Forward pass of HIPT (given an image tensor x), with certain intermediate representations saved in
a dictionary (that is to be stored in a H5 file). See walkthrough of how the model works above.
Args:
- x (torch.Tensor): [1 x C x W' x H'] image tensor.
Return:
- asset_dict (dict): Dictionary of intermediate feature representations of HIPT and other metadata.
- features_cls256 (np.array): [B x 384] extracted ViT-256 cls tokens
- features_mean256 (np.array): [1 x 384] mean ViT-256 cls token (exluding non-tissue patches)
- features_4k (np.array): [1 x 192] extracted ViT-4K cls token.
- features_4k (np.array): [1 x 576] feature vector (concatenating mean ViT-256 + ViT-4K cls tokens)
"""
batch_256, w_256, h_256 = self.prepare_img_tensor(x)
batch_256 = batch_256.unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
features_cls256 = []
for mini_bs in range(0, batch_256.shape[0], 256):
minibatch_256 = batch_256[mini_bs:mini_bs+256].to(self.device256, non_blocking=True)
features_cls256.append(self.model256(minibatch_256).detach().cpu())
features_cls256 = torch.vstack(features_cls256)
features_mean256 = features_cls256.mean(dim=0).unsqueeze(dim=0)
features_grid256 = features_cls256.reshape(w_256, h_256, 384).transpose(0,1).transpose(0,2).unsqueeze(dim=0)
features_grid256 = features_grid256.to(self.device4k, non_blocking=True)
features_cls4k = self.model4k.forward(features_grid256).detach().cpu()
features_mean256_cls4k = torch.cat([features_mean256, features_cls4k], dim=1)
asset_dict = {
'features_cls256': features_cls256.numpy(),
'features_mean256': features_mean256.numpy(),
'features_cls4k': features_cls4k.numpy(),
'features_mean256_cls4k': features_mean256_cls4k.numpy()
}
return asset_dict
def _get_region_attention_scores(self, region, scale=1):
r"""
Forward pass in hierarchical model with attention scores saved.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- np.array: [256, 256/scale, 256/scale, 3] np.array sequence of image patches from the 4K x 4K region.
- attention_256 (torch.Tensor): [256, 256/scale, 256/scale, 3] torch.Tensor sequence of attention maps for 256-sized patches.
- attention_4k (torch.Tensor): [1, 4096/scale, 4096/scale, 3] torch.Tensor sequence of attention maps for 4k-sized regions.
"""
eval_t = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
x = eval_transforms()(region).unsqueeze(dim=0)
batch_256, w_256, h_256 = self.prepare_img_tensor(x)
batch_256 = batch_256.unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(self.device256, non_blocking=True)
features_cls256 = self.model256(batch_256)
attention_256 = self.model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(w_256*h_256, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
features_grid256 = features_cls256.reshape(w_256, h_256, 384).transpose(0,1).transpose(0,2).unsqueeze(dim=0)
features_grid256 = features_grid256.to(self.device4k, non_blocking=True)
features_cls4k = self.model4k.forward(features_grid256).detach().cpu()
attention_4k = self.model4k.get_last_selfattention(features_grid256)
nh = attention_4k.shape[1] # number of head
attention_4k = attention_4k[0, :, 0, 1:].reshape(nh, -1)
attention_4k = attention_4k.reshape(nh, w_256, h_256)
attention_4k = nn.functional.interpolate(attention_4k.unsqueeze(0), scale_factor=int(256/scale), mode="nearest")[0].cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256, attention_4k
def get_region_attention_heatmaps(self, x, offset=128, scale=4, alpha=0.5, cmap = cmap_map(lambda x: x/2 + 0.5, matplotlib.cm.jet), threshold=None):
r"""
Creates hierarchical heatmaps (Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps saved individually).
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region = Image.fromarray(tensorbatch2im(x)[0])
w, h = region.size
region2 = add_margin(region.crop((128,128,w,h)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,w,h)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,w,h)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = self._get_region_attention_scores(region, scale)
b256_2, a256_2, a4k_2 = self._get_region_attention_scores(region, scale)
b256_3, a256_3, a4k_3 = self._get_region_attention_scores(region, scale)
b256_4, a256_4, a4k_4 = self._get_region_attention_scores(region, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
w_s, h_s = w//scale, h//scale
w_256, h_256 = w//256, h//256
save_region = np.array(region.resize((w_s, h_s)))
if threshold != None:
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], w_256, h_256, size=(w_s//w_256,h_s//h_256))
score256_2 = concat_scores256(a256_2[:,i,:,:], w_256, h_256, size=(w_s//w_256,h_s//h_256))
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:w_s, offset_2:h_s] = score256_2[:(w_s-offset_2), :(h_s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:w_s, offset_2:h_s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
if False:
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
score4k = score4k_1 / 100
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_4k[%s].png' % (fname, j)))
hm4k, hm256, hm4k_256 = [], [], []
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
score4k_2 = concat_scores4k(a4k_2[j], size=(h_s,w_s))
score4k_3 = concat_scores4k(a4k_3[j], size=(h_s,w_s))
score4k_4 = concat_scores4k(a4k_4[j], size=(h_s,w_s))
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:h_s, offset_2:w_s] = score4k_2[:(h_s-offset_2), :(w_s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:h_s, offset_3:w_s] = score4k_3[:(h_s-offset_3), :(w_s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:h_s, offset_4:w_s] = score4k_4[:(h_s-offset_4), :(w_s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:h_s, offset_2:w_s] += 100
overlay4k[offset_3:h_s, offset_3:w_s] += 100
overlay4k[offset_4:h_s, offset_4:w_s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hm4k.append(Image.fromarray(region4k_hm))
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], h_256, w_256, size=(256, 256))
score256_2 = concat_scores256(a256_2[:,i,:,:], h_256, w_256, size=(256, 256))
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:h_s, offset_2:w_s] = score256_2[:(h_s-offset_2), :(w_s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:h_s, offset_2:w_s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hm256.append(Image.fromarray(region256_hm))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
score4k_2 = concat_scores4k(a4k_2[j], size=(h_s,w_s))
score4k_3 = concat_scores4k(a4k_3[j], size=(h_s,w_s))
score4k_4 = concat_scores4k(a4k_4[j], size=(h_s,w_s))
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:h_s, offset_2:w_s] = score4k_2[:(h_s-offset_2), :(w_s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:h_s, offset_3:w_s] = score4k_3[:(h_s-offset_3), :(w_s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:h_s, offset_4:w_s] = score4k_4[:(h_s-offset_4), :(w_s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:h_s, offset_2:w_s] += 100
overlay4k[offset_3:h_s, offset_3:w_s] += 100
overlay4k[offset_4:h_s, offset_4:w_s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], h_256, w_256, size=(256, 256))
score256_2 = concat_scores256(a256_2[:,i,:,:], h_256, w_256, size=(256, 256))
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:h_s, offset_2:w_s] = score256_2[:(h_s-offset_2), :(w_s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:h_s, offset_2:w_s] += 100
score256 = (score256_1+new_score256_2)/overlay256
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region4k_256_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hm4k_256.append(Image.fromarray(region4k_256_hm))
return hm4k, hm256, hm4k_256
def prepare_img_tensor(self, img: torch.Tensor, patch_size=256):
"""
Helper function that takes a non-square image tensor, and takes a center crop s.t. the width / height
are divisible by 256.
(Note: "_256" for w / h is should technically be renamed as "_ps", but may not be easier to read.
Until I need to make HIPT with patch_sizes != 256, keeping the naming convention as-is.)
Args:
- img (torch.Tensor): [1 x C x W' x H'] image tensor.
- patch_size (int): Desired patch size to evenly subdivide the image.
Return:
- img_new (torch.Tensor): [1 x C x W x H] image tensor, where W and H are divisble by patch_size.
- w_256 (int): # of [256 x 256] patches of img_new's width (e.g. - W/256)
- h_256 (int): # of [256 x 256] patches of img_new's height (e.g. - H/256)
"""
make_divisble = lambda l, patch_size: (l - (l % patch_size))
b, c, w, h = img.shape
load_size = make_divisble(w, patch_size), make_divisble(h, patch_size)
w_256, h_256 = w // patch_size, h // patch_size
img_new = transforms.CenterCrop(load_size)(img)
return img_new, w_256, h_256 | 15,783 | 46.830303 | 149 | py |
HIPT | HIPT-master/HIPT_4K/hipt_heatmap_utils.py | ### Dependencies
# Base Dependencies
import argparse
import colorsys
from io import BytesIO
import os
import random
import requests
import sys
# LinAlg / Stats / Plotting Dependencies
import cv2
import h5py
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from scipy.stats import rankdata
import skimage.io
from skimage.measure import find_contours
from tqdm import tqdm
import webdataset as wds
# Torch Dependencies
import torch
import torch.nn as nn
import torch.multiprocessing
import torchvision
from torchvision import transforms
from einops import rearrange, repeat
torch.multiprocessing.set_sharing_strategy('file_system')
from attention_visualization_utils import get_patch_attention_scores, tensorbatch2im, concat_scores256
#def concat_scores256(attns, w_256, h_256, size=(256,256)):
# r"""
#
# """
# rank = lambda v: rankdata(v)*100/len(v)
# color_block = [rank(attn.flatten()).reshape(size) for attn in attns]
# color_hm = np.concatenate([
# np.concatenate(color_block[i:(i+h_256)], axis=1)
# for i in range(0,h_256*w_256,h_256)
# ])
# return color_hm
def concat_scores4k(attn, size=(4096, 4096)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_hm = rank(attn.flatten()).reshape(size)
return color_hm
def get_scores256(attns, size=(256,256)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_block = [rank(attn.flatten()).reshape(size) for attn in attns][0]
return color_block
def cmap_map(function, cmap):
r"""
Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
Args:
- function (function)
- cmap (matplotlib.colormap)
Returns:
- matplotlib.colormap
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap', cdict, 1024)
def getConcatImage(imgs, how='horizontal', gap=0):
r"""
Function to concatenate list of images (vertical or horizontal).
Args:
- imgs (list of PIL.Image): List of PIL Images to concatenate.
- how (str): How the images are concatenated (either 'horizontal' or 'vertical')
- gap (int): Gap (in px) between images
Return:
- dst (PIL.Image): Concatenated image result.
"""
gap_dist = (len(imgs)-1)*gap
if how == 'vertical':
w, h = np.max([img.width for img in imgs]), np.sum([img.height for img in imgs])
h += gap_dist
curr_h = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for img in imgs:
dst.paste(img, (0, curr_h))
curr_h += img.height + gap
elif how == 'horizontal':
w, h = np.sum([img.width for img in imgs]), np.min([img.height for img in imgs])
w += gap_dist
curr_w = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for idx, img in enumerate(imgs):
dst.paste(img, (curr_w, 0))
curr_w += img.width + gap
return dst
def add_margin(pil_img, top, right, bottom, left, color):
r"""
Adds custom margin to PIL.Image.
"""
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), color)
result.paste(pil_img, (left, top))
return result
################################################
# 256 x 256 ("Patch") Attention Heatmap Creation
################################################
def create_patch_heatmaps_indiv(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm'), device256=torch.device('cpu')):
r"""
Creates patch heatmaps (saved individually)
To be refactored!
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256, device256=device256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256, device256=device256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
def create_patch_heatmaps_concat(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm'), device256=torch.device('cpu')):
r"""
Creates patch heatmaps (concatenated for easy comparison)
To be refactored!
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256, device256=device256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256, device256=device256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
ths = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
ths.append(region256_hm+img_inverse)
ths = [Image.fromarray(img) for img in ths]
getConcatImage([getConcatImage(ths[0:3]),
getConcatImage(ths[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256th.png' % (fname)))
hms = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hms.append(region256_hm)
hms = [Image.fromarray(img) for img in hms]
getConcatImage([getConcatImage(hms[0:3]),
getConcatImage(hms[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256hm.png' % (fname)))
################################################
# 4096 x 4096 ("Region") Attention Heatmap Creation
################################################
def get_region_attention_scores(region, model256, model4k, scale=1,
device256=torch.device('cpu'),
device4k=torch.device('cpu')):
r"""
Forward pass in hierarchical model with attention scores saved.
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- np.array: [256, 256/scale, 256/scale, 3] np.array sequence of image patches from the 4K x 4K region.
- attention_256 (torch.Tensor): [256, 256/scale, 256/scale, 3] torch.Tensor sequence of attention maps for 256-sized patches.
- attention_4k (torch.Tensor): [1, 4096/scale, 4096/scale, 3] torch.Tensor sequence of attention maps for 4k-sized regions.
"""
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(region).unsqueeze(0).unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
attention_256 = model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(256, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
features_4096 = features_256.unfold(0, 16, 16).transpose(0,1).unsqueeze(dim=0)
attention_4096 = model4k.get_last_selfattention(features_4096.detach().to(device4k))
nh = attention_4096.shape[1] # number of head
attention_4096 = attention_4096[0, :, 0, 1:].reshape(nh, -1)
attention_4096 = attention_4096.reshape(nh, 16, 16)
attention_4096 = nn.functional.interpolate(attention_4096.unsqueeze(0), scale_factor=int(256/scale), mode="nearest")[0].cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256, attention_4096
def create_hierarchical_heatmaps_indiv(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'), threshold=None,
device256=torch.device('cpu'), device4k=torch.device('cpu')):
r"""
Creates hierarchical heatmaps (Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps saved individually).
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale, device256=device256, device4k=device4k)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale, device256=device256, device4k=device4k)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale, device256=device256, device4k=device4k)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale, device256=device256, device4k=device4k)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
if threshold != None:
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
if False:
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k = score4k_1 / 100
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_4k[%s].png' % (fname, j)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_1024[%s].png' % (fname, j)))
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region_hm).save(os.path.join(output_dir, '%s_factorized_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'),
device256=torch.device('cpu'), device4k=torch.device('cpu')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison)
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale, device256=device256, device4k=device4k)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale, device256=device256, device4k=device4k)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale, device256=device256, device4k=device4k)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale, device256=device256, device4k=device4k)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
pad = 100
canvas = Image.new('RGB', (s*2+pad,)*2, (255,)*3)
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype("FreeMono.ttf", 50)
draw.text((1024*0.5-pad*2, pad//4), "ViT-256 (Head: %d)" % i, (0, 0, 0), font=font)
canvas = canvas.rotate(90)
draw = ImageDraw.Draw(canvas)
draw.text((1024*1.5-pad, pad//4), "ViT-4K (Head: %d)" % j, (0, 0, 0), font=font)
canvas.paste(Image.fromarray(save_region), (pad,pad))
canvas.paste(Image.fromarray(region4k_hm), (1024+pad,pad))
canvas.paste(Image.fromarray(region256_hm), (pad,1024+pad))
canvas.paste(Image.fromarray(region_hm), (s+pad,s+pad))
canvas.save(os.path.join(output_dir, '%s_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat_select(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'),
device256=torch.device('cpu'), device4k=torch.device('cpu')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison), with only select attention heads are used.
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale, device256=device256, device4k=device4k)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale, device256=device256, device4k=device4k)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale, device256=device256, device4k=device4k)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale, device256=device256, device4k=device4k)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
canvas = [[Image.fromarray(save_region), None, None], [None, None, None]]
for idx_4k, j in enumerate([0,5]):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[0][idx_4k+1] = Image.fromarray(region4k_hm)
for idx_256, i in enumerate([2]):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][0] = Image.fromarray(region256_hm)
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][idx_4k+1] = Image.fromarray(region_hm)
canvas = getConcatImage([getConcatImage(row) for row in canvas], how='vertical')
canvas.save(os.path.join(output_dir, '%s_heatmap.png' % (fname)))
return | 31,892 | 46.672646 | 163 | py |
HIPT | HIPT-master/HIPT_4K/vision_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 12,706 | 37.389728 | 124 | py |
HIPT | HIPT-master/HIPT_4K/vision_transformer4k.py | import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import vision_transformer as vits
from vision_transformer import DINOHead
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer4K(nn.Module):
""" Vision Transformer 4K """
def __init__(self, num_classes=0, img_size=[224], input_embed_dim=384, output_embed_dim = 192,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, num_prototypes=64, **kwargs):
super().__init__()
embed_dim = output_embed_dim
self.num_features = self.embed_dim = embed_dim
self.phi = nn.Sequential(*[nn.Linear(input_embed_dim, output_embed_dim), nn.GELU(), nn.Dropout(p=drop_rate)])
num_patches = int(img_size[0] // 16)**2
print("# of Patches:", num_patches)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // 1
h0 = h // 1
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
#print('preparing tokens (after crop)', x.shape)
self.mpp_feature = x
B, embed_dim, w, h = x.shape
x = x.flatten(2, 3).transpose(1,2)
x = self.phi(x)
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit4k_xs(patch_size=16, **kwargs):
model = VisionTransformer4K(
patch_size=patch_size, input_embed_dim=384, output_embed_dim=192,
depth=6, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 10,172 | 35.858696 | 123 | py |
HIPT | HIPT-master/HIPT_4K/hipt_model_utils.py | ### Dependencies
# Base Dependencies
import argparse
import colorsys
from io import BytesIO
import os
import random
import requests
import sys
# LinAlg / Stats / Plotting Dependencies
import cv2
import h5py
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from scipy.stats import rankdata
import skimage.io
from skimage.measure import find_contours
from tqdm import tqdm
import webdataset as wds
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
from torchvision import transforms
from einops import rearrange, repeat
torch.multiprocessing.set_sharing_strategy('file_system')
# Local Dependencies
import vision_transformer as vits
import vision_transformer4k as vits4k
def get_vit256(pretrained_weights, arch='vit_small', device=torch.device('cuda:0')):
r"""
Builds ViT-256 Model.
Args:
- pretrained_weights (str): Path to ViT-256 Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model256 = vits.__dict__[arch](patch_size=16, num_classes=0)
for p in model256.parameters():
p.requires_grad = False
model256.eval()
model256.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model256.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model256
def get_vit4k(pretrained_weights, arch='vit4k_xs', device=torch.device('cuda:1')):
r"""
Builds ViT-4K Model.
Args:
- pretrained_weights (str): Path to ViT-4K Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model4k = vits4k.__dict__[arch](num_classes=0)
for p in model4k.parameters():
p.requires_grad = False
model4k.eval()
model4k.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model4k.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model4k
def eval_transforms():
"""
"""
mean, std = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
eval_t = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean = mean, std = std)])
return eval_t
def roll_batch2img(batch: torch.Tensor, w: int, h: int, patch_size=256):
"""
Rolls an image tensor batch (batch of [256 x 256] images) into a [W x H] Pil.Image object.
Args:
batch (torch.Tensor): [B x 3 x 256 x 256] image tensor batch.
Return:
Image.PIL: [W x H X 3] Image.
"""
batch = batch.reshape(w, h, 3, patch_size, patch_size)
img = rearrange(batch, 'p1 p2 c w h-> c (p1 w) (p2 h)').unsqueeze(dim=0)
return Image.fromarray(tensorbatch2im(img)[0])
def tensorbatch2im(input_image, imtype=np.uint8):
r""""
Converts a Tensor array into a numpy image array.
Args:
- input_image (torch.Tensor): (B, C, W, H) Torch Tensor.
- imtype (type): the desired type of the converted numpy array
Returns:
- image_numpy (np.array): (B, W, H, C) Numpy Array.
"""
if not isinstance(input_image, np.ndarray):
image_numpy = input_image.cpu().float().numpy() # convert it into a numpy array
#if image_numpy.shape[0] == 1: # grayscale to RGB
# image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
| 5,125 | 32.503268 | 122 | py |
HIPT | HIPT-master/HIPT_4K/attention_visualization_utils.py | ### Dependencies
import argparse
import colorsys
from io import BytesIO
import os
import random
import requests
import sys
import cv2
import h5py
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from scipy.stats import rankdata
import skimage.io
from skimage.measure import find_contours
from tqdm import tqdm
import webdataset as wds
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import torchvision.transforms as transforms
from einops import rearrange, repeat
sys.path.append('../')
sys.path.append('../Hierarchical-Pretraining/')
import vision_transformer as vits
import vision_transformer4k as vits4k
def get_vit256(pretrained_weights, arch='vit_small', device=torch.device('cpu')):
r"""
Builds ViT-256 Model.
Args:
- pretrained_weights (str): Path to ViT-256 Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model256 = vits.__dict__[arch](patch_size=16, num_classes=0)
for p in model256.parameters():
p.requires_grad = False
model256.eval()
model256.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model256.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model256
def get_vit4k(pretrained_weights, arch='vit4k_xs', device=torch.device('cpu')):
r"""
Builds ViT-4K Model.
Args:
- pretrained_weights (str): Path to ViT-4K Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model4k = vits4k.__dict__[arch](num_classes=0)
for p in model4k.parameters():
p.requires_grad = False
model4k.eval()
model4k.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model4k.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model4k
def cmap_map(function, cmap):
r"""
Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
Args:
- function (function)
- cmap (matplotlib.colormap)
Returns:
- matplotlib.colormap
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)
def identity(x):
r"""
Identity Function.
Args:
- x:
Returns:
- x
"""
return x
def tensorbatch2im(input_image, imtype=np.uint8):
r""""
Converts a Tensor array into a numpy image array.
Args:
- input_image (torch.Tensor): (B, C, W, H) Torch Tensor.
- imtype (type): the desired type of the converted numpy array
Returns:
- image_numpy (np.array): (B, W, H, C) Numpy Array.
"""
if not isinstance(input_image, np.ndarray):
image_numpy = input_image.cpu().float().numpy() # convert it into a numpy array
#if image_numpy.shape[0] == 1: # grayscale to RGB
# image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def getConcatImage(imgs, how='horizontal', gap=0):
r"""
Function to concatenate list of images (vertical or horizontal).
Args:
- imgs (list of PIL.Image): List of PIL Images to concatenate.
- how (str): How the images are concatenated (either 'horizontal' or 'vertical')
- gap (int): Gap (in px) between images
Return:
- dst (PIL.Image): Concatenated image result.
"""
gap_dist = (len(imgs)-1)*gap
if how == 'vertical':
w, h = np.max([img.width for img in imgs]), np.sum([img.height for img in imgs])
h += gap_dist
curr_h = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for img in imgs:
dst.paste(img, (0, curr_h))
curr_h += img.height + gap
elif how == 'horizontal':
w, h = np.sum([img.width for img in imgs]), np.min([img.height for img in imgs])
w += gap_dist
curr_w = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for idx, img in enumerate(imgs):
dst.paste(img, (curr_w, 0))
curr_w += img.width + gap
return dst
def add_margin(pil_img, top, right, bottom, left, color):
r"""
Adds custom margin to PIL.Image.
"""
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), color)
result.paste(pil_img, (left, top))
return result
def concat_scores256(attns, size=(256,256)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_block = [rank(attn.flatten()).reshape(size) for attn in attns]
color_hm = np.concatenate([
np.concatenate(color_block[i:(i+16)], axis=1)
for i in range(0,256,16)
])
return color_hm
def concat_scores4k(attn, size=(4096, 4096)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_hm = rank(attn.flatten()).reshape(size)
return color_hm
def get_scores256(attns, size=(256,256)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_block = [rank(attn.flatten()).reshape(size) for attn in attns][0]
return color_block
def get_patch_attention_scores(patch, model256, scale=1, device256=torch.device('cpu')):
r"""
Forward pass in ViT-256 model with attention scores saved.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- attention_256 (torch.Tensor): [1, 256/scale, 256/scale, 3] torch.Tensor of attention maps for 256-sized patches.
"""
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(patch).unsqueeze(0)
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
attention_256 = model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(1, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256
def create_patch_heatmaps_indiv(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm'), device256=torch.device('cpu')):
r"""
Creates patch heatmaps (saved individually)
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256, device256=device256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256, device256=device256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
def create_patch_heatmaps_concat(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm')):
r"""
Creates patch heatmaps (concatenated for easy comparison)
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
ths = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
ths.append(region256_hm+img_inverse)
ths = [Image.fromarray(img) for img in ths]
getConcatImage([getConcatImage(ths[0:3]),
getConcatImage(ths[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256th.png' % (fname)))
hms = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hms.append(region256_hm)
hms = [Image.fromarray(img) for img in hms]
getConcatImage([getConcatImage(hms[0:3]),
getConcatImage(hms[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256hm.png' % (fname)))
def hipt_forward_pass(region, model256, model4k, scale=1,
device256=torch.device('cpu'),
device4k=torch.device('cpu')):
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(region).unsqueeze(0).unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
features_256 = features_256.unfold(0, 16, 16).transpose(0,1).unsqueeze(dim=0)
features_4096 = model4k.forward(features_256.to(device4k))
return features_4096
def get_region_attention_scores(region, model256, model4k, scale=1,
device256=torch.device('cpu'),
device4k=torch.device('cpu')):
r"""
Forward pass in hierarchical model with attention scores saved.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- np.array: [256, 256/scale, 256/scale, 3] np.array sequence of image patches from the 4K x 4K region.
- attention_256 (torch.Tensor): [256, 256/scale, 256/scale, 3] torch.Tensor sequence of attention maps for 256-sized patches.
- attention_4k (torch.Tensor): [1, 4096/scale, 4096/scale, 3] torch.Tensor sequence of attention maps for 4k-sized regions.
"""
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(region).unsqueeze(0).unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
attention_256 = model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(256, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
features_4096 = features_256.unfold(0, 16, 16).transpose(0,1).unsqueeze(dim=0)
attention_4096 = model4k.get_last_selfattention(features_4096.detach().to(device4k))
nh = attention_4096.shape[1] # number of head
attention_4096 = attention_4096[0, :, 0, 1:].reshape(nh, -1)
attention_4096 = attention_4096.reshape(nh, 16, 16)
attention_4096 = nn.functional.interpolate(attention_4096.unsqueeze(0), scale_factor=int(256/scale), mode="nearest")[0].cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256, attention_4096
def create_hierarchical_heatmaps_indiv(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'), threshold=None):
r"""
Creates hierarchical heatmaps (Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps saved individually).
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
if threshold != None:
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
if False:
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k = score4k_1 / 100
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_4k[%s].png' % (fname, j)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_1024[%s].png' % (fname, j)))
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region_hm).save(os.path.join(output_dir, '%s_factorized_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison)
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
pad = 100
canvas = Image.new('RGB', (s*2+pad,)*2, (255,)*3)
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype("arial.ttf", 50)
draw.text((1024*0.5-pad*2, pad//4), "ViT-256 (Head: %d)" % i, (0, 0, 0), font=font)
canvas = canvas.rotate(90)
draw = ImageDraw.Draw(canvas)
draw.text((1024*1.5-pad, pad//4), "ViT-4K (Head: %d)" % j, (0, 0, 0), font=font)
canvas.paste(Image.fromarray(save_region), (pad,pad))
canvas.paste(Image.fromarray(region4k_hm), (1024+pad,pad))
canvas.paste(Image.fromarray(region256_hm), (pad,1024+pad))
canvas.paste(Image.fromarray(region_hm), (s+pad,s+pad))
canvas.save(os.path.join(output_dir, '%s_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat_select(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison)
Note that only select attention heads are used.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
canvas = [[Image.fromarray(save_region), None, None], [None, None, None]]
for idx_4k, j in enumerate([0,5]):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[0][idx_4k+1] = Image.fromarray(region4k_hm)
for idx_256, i in enumerate([2]):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][0] = Image.fromarray(region256_hm)
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][idx_4k+1] = Image.fromarray(region_hm)
canvas = getConcatImage([getConcatImage(row) for row in canvas], how='vertical')
canvas.save(os.path.join(output_dir, '%s_heatmap.png' % (fname)))
return | 36,576 | 44.10111 | 141 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/main.py | ### Base Packages
from __future__ import print_function
import argparse
import pdb
import os
import math
### Numerical Packages
import numpy as np
import pandas as pd
### Internal Imports
from datasets.dataset_generic import Generic_WSI_Classification_Dataset, Generic_MIL_Dataset
from utils.file_utils import save_pkl, load_pkl
from utils.utils import *
from utils.core_utils import train
### PyTorch Imports
import torch
from torch.utils.data import DataLoader, sampler
import torch.nn as nn
import torch.nn.functional as F
##### Train-Val-Test Loop for 10-Fold CV
def main(args):
### Creates Results Directory (if not previously created)
if not os.path.isdir(args.results_dir):
os.mkdir(args.results_dir)
### Which folds to evaluates + iterate
if args.k_start == -1:
start = 0
else:
start = args.k_start
if args.k_end == -1:
end = args.k
else:
end = args.k_end
### 10-Fold CV Loop.
all_test_auc, all_val_auc = [], []
all_test_acc, all_val_acc= [], []
folds = np.arange(start, end)
for i in folds:
seed_torch(args.seed) ### Sets the Torch.Seed
train_dataset, val_dataset, test_dataset = dataset.return_splits(from_id=False, csv_path='{}/splits_{}.csv'.format(args.split_dir, i))
datasets = (train_dataset, val_dataset, test_dataset)
results, test_auc, val_auc, test_acc, val_acc = train(datasets, i, args)
all_test_auc.append(test_auc)
all_val_auc.append(val_auc)
all_test_acc.append(test_acc)
all_val_acc.append(val_acc)
### Writes results to PKL File
filename = os.path.join(args.results_dir, 'split_{}_results.pkl'.format(i))
save_pkl(filename, results)
### Saves results as a CSV file
final_df = pd.DataFrame({'folds': folds, 'test_auc': all_test_auc, 'val_auc': all_val_auc, 'test_acc': all_test_acc, 'val_acc' : all_val_acc})
if len(folds) != args.k:
save_name = 'summary_partial_{}_{}.csv'.format(start, end)
else:
save_name = 'summary.csv'
final_df.to_csv(os.path.join(args.results_dir, save_name))
##### Argparser
### (Default) Training settings
parser = argparse.ArgumentParser(description='Configurations for WSI Training')
parser.add_argument('--data_root_dir', type=str, default='/media/ssd1/pan-cancer', help='data directory')
parser.add_argument('--max_epochs', type=int, default=20, help='maximum number of epochs to train (default: 200)')
parser.add_argument('--lr', type=float, default=2e-4, help='learning rate (default: 0.0001)')
parser.add_argument('--label_frac', type=float, default=1.0, help='fraction of training labels (default: 1.0)')
parser.add_argument('--reg', type=float, default=1e-5, help='weight decay (default: 1e-5)')
parser.add_argument('--seed', type=int, default=1, help='random seed for reproducible experiment (default: 1)')
parser.add_argument('--k', type=int, default=10, help='number of folds (default: 10)')
parser.add_argument('--k_start', type=int, default=-1, help='start fold (default: -1, last fold)')
parser.add_argument('--k_end', type=int, default=-1, help='end fold (default: -1, first fold)')
parser.add_argument('--results_dir', type=str, default='./results', help='results directory (default: ./results)')
parser.add_argument('--opt', type=str, choices = ['adam', 'sgd'], default='adam')
parser.add_argument('--bag_loss', type=str, choices=['svm', 'ce'], default='ce', help='slide-level classification loss function (default: ce)')
parser.add_argument('--model_size', type=str, choices=['small', 'big'], default='small', help='size of model, does not affect mil')
parser.add_argument('--log_data', action='store_true', default=True, help='log data using tensorboard')
parser.add_argument('--testing', action='store_true', default=False, help='debugging tool')
parser.add_argument('--early_stopping', action='store_true', default=False, help='enable early stopping')
parser.add_argument('--drop_out', action='store_true', default=True, help='enabel dropout (p=0.25)')
parser.add_argument('--weighted_sample',action='store_true', default=True, help='enable weighted sampling')
### CLAM specific options
parser.add_argument('--bag_weight', type=float, default=0.7, help='clam: weight coefficient for bag-level loss (default: 0.7)')
parser.add_argument('--B', type=int, default=8, help='numbr of positive/negative patches to sample for clam')
parser.add_argument('--inst_loss', type=str, choices=['svm', 'ce', None], default='svm', help='instance-level clustering loss function (default: None)')
parser.add_argument('--no_inst_cluster',action='store_true', default=False, help='disable instance-level clustering')
parser.add_argument('--subtyping', action='store_true', default=False, help='subtyping problem')
### Options Used
parser.add_argument('--model_type', type=str, default='clam_sb', help='Type of model to use',
choices=['clam_sb', 'clam_mb', 'mil', 'dgcn', 'mi_fcn', 'dsmil', 'hipt_n', 'hipt_lgp'])
parser.add_argument('--features', type=str, default='vits_tcga_pancancer_dino', help='Which features to use',
choices=['resnet50_trunc', 'vits_tcga_pancancer_dino'])
parser.add_argument('--task', type=str, default='tcga_lung_subtype', help='Which weakly-supervised task to evaluate on.')
parser.add_argument('--path_input_dim', type=int, default=384, help='Size of patch embedding size (384 for DINO)')
parser.add_argument('--mode', type=str, default='path', help='Which features to load')
parser.add_argument('--prop', type=float, default=1.0, help='Proportion of training dataset to use')
parser.add_argument('--pretrain_4k', type=str, default='None', help='Whether to initialize the 4K Transformer in HIPT', choices=['None', 'vit4k_xs_dino'])
parser.add_argument('--freeze_4k', action='store_true', default=False, help='Whether to freeze the 4K Transformer in HIPT')
parser.add_argument('--freeze_WSI', action='store_true', default=False, help='Whether to freeze the WSI Transformer in HIPT')
args = parser.parse_args()
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
##### Creating Experiment Code
### 1. If HIPT, set the mode to be 'pyramid'
if 'hipt' in args.model_type:
args.mode = 'pyramid'
### 2. If using 'hipt_lgp' (HIPT with local-global pretraining), modify the experiment code for any freezing + pretraining
if args.model_type == 'hipt_lgp':
if args.freeze_4k and (not args.freeze_WSI):
model_code = 'hipt_lgp[%s]_freeze_[%s]' % (args.pretrain_4k, args.pretrain_WSI)
else:
model_code = 'hipt_lgp[%s]_[%s]' % (args.pretrain_4k, args.pretrain_WSI)
else:
model_code = args.model_type
### 3. Add embedding dimension in the experiment code.
if args.path_input_dim != 384:
model_code += '_%d' % args.path_input_dim
### 3. Add task information in the experiment code.
if 'subtype' in args.task:
args.exp_code = '%s_%s_%s_%0.2f' % (args.task, model_code, args.features, args.prop)
args.splits = '10foldcv_subtype'
args.split_dir = './splits/%s/%s' % (args.splits, '_'.join(args.task.split('_')[:2]))
print("Setting Splits Directory...", args.split_dir)
##### Setting the seed + log settings
def seed_torch(seed=7):
import random
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device.type == 'cuda':
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed_torch(args.seed)
encoding_size = 1024
settings = {'num_splits': args.k,
'k_start': args.k_start,
'k_end': args.k_end,
'task': args.task,
'max_epochs': args.max_epochs,
'results_dir': args.results_dir,
'lr': args.lr,
'experiment': args.exp_code,
'reg': args.reg,
'label_frac': args.label_frac,
'bag_loss': args.bag_loss,
'seed': args.seed,
'model_type': args.model_type,
'model_size': args.model_size,
"use_drop_out": args.drop_out,
'weighted_sample': args.weighted_sample,
'opt': args.opt}
if args.model_type in ['clam_sb', 'clam_mb']:
settings.update({'bag_weight': args.bag_weight,
'inst_loss': args.inst_loss,
'B': args.B})
##### Loading the dataset
print('\nLoad Dataset')
print(args.task)
study = "_".join(args.task.split('_')[:2])
if args.mode == 'pyramid':
study_dir = '{}/extracted_mag20x_patch4096_fp/{}_pt_patch_features_384'.format(study, args.features)
else:
study_dir = '{}/extracted_mag20x_patch256_fp/{}_pt_patch_features'.format(study, args.features)
if args.task == 'tcga_lung_subtype':
args.n_classes = 2
dataset = Generic_MIL_Dataset(csv_path = './dataset_csv/tcga_lung_subset.csv.zip',
data_dir= os.path.join(args.data_root_dir, study_dir),
mode=args.mode,
shuffle = False,
seed = args.seed,
print_info = True,
label_col='oncotree_code',
label_dict = {'LUAD':0, 'LUSC':1},
patient_strat=False,
prop=args.prop,
ignore=[])
elif args.task == 'tcga_kidney_subtype':
args.n_classes = 3
dataset = Generic_MIL_Dataset(csv_path = './dataset_csv/tcga_kidney_subset.csv.zip',
data_dir= os.path.join(args.data_root_dir, study_dir),
mode=args.mode,
shuffle = False,
seed = args.seed,
print_info = True,
label_col='oncotree_code',
label_dict = {'CCRCC':0, 'PRCC':1, 'CHRCC':2},
patient_strat=False,
prop=args.prop,
ignore=[])
elif args.task == 'tcga_brca_subtype':
args.n_classes = 2
dataset = Generic_MIL_Dataset(csv_path = './dataset_csv/tcga_brca_subset.csv.zip',
data_dir= os.path.join(args.data_root_dir, study_dir),
mode=args.mode,
shuffle = False,
seed = args.seed,
print_info = True,
label_col='oncotree_code',
label_dict = {'IDC':0, 'ILC':1},
patient_strat=False,
prop=args.prop,
ignore=['MDLC', 'PD', 'ACBC', 'IMMC', 'BRCNOS', 'BRCA', 'SPC', 'MBC', 'MPT'])
else:
raise NotImplementedError
if not os.path.isdir(args.results_dir):
os.mkdir(args.results_dir)
if 'subtype' in args.task:
exp_folder = args.task
args.results_dir = os.path.join(args.results_dir, exp_folder, str(args.exp_code) + '_none_s%d' % (args.seed))
if not os.path.isdir(args.results_dir):
os.makedirs(args.results_dir, exist_ok=True)
else:
if 'summary.csv' in os.listdir(args.results_dir):
print("Exp Code <%s> already exists! Exiting script." % args.exp_code)
import sys
sys.exit()
print('split_dir: ', args.split_dir)
assert os.path.isdir(args.split_dir)
settings.update({'split_dir': args.split_dir})
with open(args.results_dir + '/experiment_{}.txt'.format(args.exp_code), 'w') as f:
print(settings, file=f)
f.close()
print("################# Settings ###################")
for key, val in settings.items():
print("{}: {}".format(key, val))
if __name__ == "__main__":
results = main(args)
print("finished!")
print("end script")
| 12,119 | 45.259542 | 157 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_utils.py | from collections import OrderedDict
from os.path import join
import math
import pdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Attention Network without Gating (2 fc layers)
args:
L: input feature dimension
D: hidden layer dimension
dropout: whether to use dropout (p = 0.25)
n_classes: number of classes
"""
class Attn_Net(nn.Module):
def __init__(self, L = 1024, D = 256, dropout = False, n_classes = 1):
super(Attn_Net, self).__init__()
self.module = [
nn.Linear(L, D),
nn.Tanh()]
if dropout:
self.module.append(nn.Dropout(0.25))
self.module.append(nn.Linear(D, n_classes))
self.module = nn.Sequential(*self.module)
def forward(self, x):
return self.module(x), x # N x n_classes
"""
Attention Network with Sigmoid Gating (3 fc layers)
args:
L: input feature dimension
D: hidden layer dimension
dropout: whether to use dropout (p = 0.25)
n_classes: number of classes
"""
class Attn_Net_Gated(nn.Module):
def __init__(self, L = 1024, D = 256, dropout = False, n_classes = 1):
r"""
Attention Network with Sigmoid Gating (3 fc layers)
args:
L (int): input feature dimension
D (int): hidden layer dimension
dropout (bool): whether to apply dropout (p = 0.25)
n_classes (int): number of classes
"""
super(Attn_Net_Gated, self).__init__()
self.attention_a = [
nn.Linear(L, D),
nn.Tanh()]
self.attention_b = [nn.Linear(L, D), nn.Sigmoid()]
if dropout:
self.attention_a.append(nn.Dropout(0.25))
self.attention_b.append(nn.Dropout(0.25))
self.attention_a = nn.Sequential(*self.attention_a)
self.attention_b = nn.Sequential(*self.attention_b)
self.attention_c = nn.Linear(D, n_classes)
def forward(self, x):
a = self.attention_a(x)
b = self.attention_b(x)
A = a.mul(b)
A = self.attention_c(A) # N x n_classes
return A, x
def init_max_weights(module):
r"""
Initialize Weights function.
args:
modules (torch.nn.Module): Initalize weight using normal distribution
"""
import math
import torch.nn as nn
for m in module.modules():
if type(m) == nn.Linear:
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.normal_(0, stdv)
m.bias.data.zero_() | 2,562 | 25.978947 | 77 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_dgcn.py | from os.path import join
from collections import OrderedDict
import pdb
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Sequential as Seq
from torch.nn import Linear, LayerNorm, ReLU
#from torch_geometric.nn import GINConv
#from torch_geometric.transforms.normalize_features import NormalizeFeatures
from models.model_utils import *
######################################
# DeepGraphConv Implementation #
######################################
class DeepGraphConv(torch.nn.Module):
def __init__(self, edge_agg='latent', resample=0, num_features=1024, hidden_dim=256,
linear_dim=256, use_edges=False, dropout=0.25, n_classes=4):
super(DeepGraphConv, self).__init__()
self.use_edges = use_edges
self.resample = resample
self.edge_agg = edge_agg
if self.resample > 0:
self.fc = nn.Sequential(*[nn.Dropout(self.resample)])
self.conv1 = GINConv(Seq(nn.Linear(num_features, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
self.conv2 = GINConv(Seq(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
self.conv3 = GINConv(Seq(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
self.path_attention_head = Attn_Net_Gated(L=hidden_dim, D=hidden_dim, dropout=dropout, n_classes=1)
self.path_rho = nn.Sequential(*[nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Dropout(dropout)])
self.classifier = torch.nn.Linear(hidden_dim, n_classes)
def relocate(self):
from torch_geometric.nn import DataParallel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
self.conv1 = nn.DataParallel(self.conv1, device_ids=device_ids).to('cuda:0')
self.conv2 = nn.DataParallel(self.conv2, device_ids=device_ids).to('cuda:0')
self.conv3 = nn.DataParallel(self.conv3, device_ids=device_ids).to('cuda:0')
self.path_attention_head = nn.DataParallel(self.path_attention_head, device_ids=device_ids).to('cuda:0')
self.path_rho = self.path_rho.to(device)
self.classifier = self.classifier.to(device)
def forward(self, **kwargs):
data = kwargs['x_path']
x = data.x
if self.edge_agg == 'spatial':
edge_index = data.edge_index
elif self.edge_agg == 'latent':
edge_index = data.edge_latent
batch = data.batch
edge_attr = None
if self.resample:
x = self.fc(x)
x1 = F.relu(self.conv1(x=x, edge_index=edge_index))
x2 = F.relu(self.conv2(x1, edge_index, edge_attr))
x3 = F.relu(self.conv3(x2, edge_index, edge_attr))
h_path = x3
A_path, h_path = self.path_attention_head(h_path)
A_path = torch.transpose(A_path, 1, 0)
h_path = torch.mm(F.softmax(A_path, dim=1) , h_path)
h_path = self.path_rho(h_path).squeeze()
h = h_path # [256] vector
logits = self.classifier(h).unsqueeze(0) # logits needs to be a [1 x 4] vector
Y_prob = F.softmax(logits, dim = 1)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, Y_prob, Y_hat, 0, 0 # The last two return are just dummy vars | 3,422 | 41.7875 | 116 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_mil.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import initialize_weights
import numpy as np
class MIL_fc(nn.Module):
def __init__(self, path_input_dim=384, gate = True, size_arg = "small", dropout = False, n_classes = 2, top_k=1):
super(MIL_fc, self).__init__()
assert n_classes == 2
self.size_dict = {"small": [path_input_dim, path_input_dim]}
size = self.size_dict[size_arg]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
fc.append(nn.Linear(size[1], n_classes))
self.classifier= nn.Sequential(*fc)
initialize_weights(self)
self.top_k=top_k
def relocate(self):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.classifier.to(device)
def forward(self, h, return_features=False, **kwargs):
if return_features:
h = self.classifier.module[:3](h)
logits = self.classifier.module[3](h)
else:
logits = self.classifier(h) # K x 1
y_probs = F.softmax(logits, dim = 1)
top_instance_idx = torch.topk(y_probs[:, 1], self.top_k, dim=0)[1].view(1,)
top_instance = torch.index_select(logits, dim=0, index=top_instance_idx)
Y_hat = torch.topk(top_instance, 1, dim = 1)[1]
Y_prob = F.softmax(top_instance, dim = 1)
results_dict = {}
if return_features:
top_features = torch.index_select(h, dim=0, index=top_instance_idx)
results_dict.update({'features': top_features})
return top_instance, Y_prob, Y_hat, y_probs, results_dict
class MIL_fc_mc(nn.Module):
def __init__(self, path_input_dim=384, gate = True, size_arg = "small", dropout = False, n_classes = 2, top_k=1):
super(MIL_fc_mc, self).__init__()
assert n_classes > 2
self.size_dict = {"small": [path_input_dim, path_input_dim]}
size = self.size_dict[size_arg]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
self.fc = nn.Sequential(*fc)
self.classifiers = nn.ModuleList([nn.Linear(size[1], 1) for i in range(n_classes)])
initialize_weights(self)
self.top_k=top_k
self.n_classes = n_classes
assert self.top_k == 1
def relocate(self):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.fc = self.fc.to(device)
self.classifiers = self.classifiers.to(device)
def forward(self, h, return_features=False, **kwargs):
device = h.device
h = self.fc(h)
logits = torch.empty(h.size(0), self.n_classes).float().to(device)
for c in range(self.n_classes):
if isinstance(self.classifiers, nn.DataParallel):
logits[:, c] = self.classifiers.module[c](h).squeeze(1)
else:
logits[:, c] = self.classifiers[c](h).squeeze(1)
y_probs = F.softmax(logits, dim = 1)
m = y_probs.view(1, -1).argmax(1)
top_indices = torch.cat(((m // self.n_classes).view(-1, 1), (m % self.n_classes).view(-1, 1)), dim=1).view(-1, 1)
top_instance = logits[top_indices[0]]
Y_hat = top_indices[1]
Y_prob = y_probs[top_indices[0]]
results_dict = {}
if return_features:
top_features = torch.index_select(h, dim=0, index=top_indices[0])
results_dict.update({'features': top_features})
return top_instance, Y_prob, Y_hat, y_probs, results_dict
| 3,647 | 36.22449 | 121 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_clam.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import initialize_weights
import numpy as np
from models.model_utils import *
"""
args:
gate: whether to use gated attention network
size_arg: config for network size
dropout: whether to use dropout
k_sample: number of positive/neg patches to sample for instance-level training
dropout: whether to use dropout (p = 0.25)
n_classes: number of classes
instance_loss_fn: loss function to supervise instance-level training
subtyping: whether it's a subtyping problem
"""
class CLAM_SB(nn.Module):
def __init__(self, path_input_dim=1024, gate = True, size_arg = "small", dropout = False, k_sample=8, n_classes=2,
instance_loss_fn=nn.CrossEntropyLoss(), subtyping=False):
super(CLAM_SB, self).__init__()
self.size_dict = {"small": [path_input_dim, 512, 256], "big": [path_input_dim, 512, 384]}
size = self.size_dict[size_arg]
if path_input_dim == 384:
size = [384, 384, 256]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
if gate:
attention_net = Attn_Net_Gated(L = size[1], D = size[2], dropout = dropout, n_classes = 1)
else:
attention_net = Attn_Net(L = size[1], D = size[2], dropout = dropout, n_classes = 1)
fc.append(attention_net)
self.attention_net = nn.Sequential(*fc)
self.classifiers = nn.Linear(size[1], n_classes)
instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]
self.instance_classifiers = nn.ModuleList(instance_classifiers)
self.k_sample = k_sample
self.instance_loss_fn = instance_loss_fn
self.n_classes = n_classes
self.subtyping = subtyping
initialize_weights(self)
def relocate(self):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.attention_net = self.attention_net.to(device)
self.classifiers = self.classifiers.to(device)
self.instance_classifiers = self.instance_classifiers.to(device)
@staticmethod
def create_positive_targets(length, device):
return torch.full((length, ), 1, device=device, dtype=torch.long)
@staticmethod
def create_negative_targets(length, device):
return torch.full((length, ), 0, device=device, dtype=torch.long)
#instance-level evaluation for in-the-class attention branch
def inst_eval(self, A, h, classifier):
device=h.device
if len(A.shape) == 1:
A = A.view(1, -1)
top_p_ids = torch.topk(A, self.k_sample)[1][-1]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
top_n_ids = torch.topk(-A, self.k_sample, dim=1)[1][-1]
top_n = torch.index_select(h, dim=0, index=top_n_ids)
p_targets = self.create_positive_targets(self.k_sample, device)
n_targets = self.create_negative_targets(self.k_sample, device)
all_targets = torch.cat([p_targets, n_targets], dim=0)
all_instances = torch.cat([top_p, top_n], dim=0)
logits = classifier(all_instances)
all_preds = torch.topk(logits, 1, dim = 1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, all_targets)
return instance_loss, all_preds, all_targets
#instance-level evaluation for out-of-the-class attention branch
def inst_eval_out(self, A, h, classifier):
device=h.device
if len(A.shape) == 1:
A = A.view(1, -1)
top_p_ids = torch.topk(A, self.k_sample)[1][-1]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
p_targets = self.create_negative_targets(self.k_sample, device)
logits = classifier(top_p)
p_preds = torch.topk(logits, 1, dim = 1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, p_targets)
return instance_loss, p_preds, p_targets
def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False, **kwargs):
device = h.device
A, h = self.attention_net(h) # NxK
A = torch.transpose(A, 1, 0) # KxN
if attention_only:
return A
A_raw = A
A = F.softmax(A, dim=1) # softmax over N
if instance_eval:
total_inst_loss = 0.0
all_preds = []
all_targets = []
inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() #binarize label
for i in range(len(self.instance_classifiers)):
inst_label = inst_labels[i].item()
classifier = self.instance_classifiers[i]
if inst_label == 1: #in-the-class:
instance_loss, preds, targets = self.inst_eval(A, h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else: #out-of-the-class
if self.subtyping:
instance_loss, preds, targets = self.inst_eval_out(A, h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else:
continue
total_inst_loss += instance_loss
if self.subtyping:
total_inst_loss /= len(self.instance_classifiers)
M = torch.mm(A, h)
logits = self.classifiers(M)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
if instance_eval:
results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets),
'inst_preds': np.array(all_preds)}
else:
results_dict = {}
if return_features:
results_dict.update({'features': M})
return logits, Y_prob, Y_hat, A_raw, results_dict
class CLAM_MB(CLAM_SB):
def __init__(self, gate = True, size_arg = "small", dropout = False, k_sample=8, n_classes=2,
instance_loss_fn=nn.CrossEntropyLoss(), subtyping=False):
nn.Module.__init__(self)
self.size_dict = {"small": [1024, 512, 256], "big": [1024, 512, 384]}
size = self.size_dict[size_arg]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
if gate:
attention_net = Attn_Net_Gated(L = size[1], D = size[2], dropout = dropout, n_classes = n_classes)
else:
attention_net = Attn_Net(L = size[1], D = size[2], dropout = dropout, n_classes = n_classes)
fc.append(attention_net)
self.attention_net = nn.Sequential(*fc)
bag_classifiers = [nn.Linear(size[1], 1) for i in range(n_classes)] #use an indepdent linear layer to predict each class
self.classifiers = nn.ModuleList(bag_classifiers)
instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]
self.instance_classifiers = nn.ModuleList(instance_classifiers)
self.k_sample = k_sample
self.instance_loss_fn = instance_loss_fn
self.n_classes = n_classes
self.subtyping = subtyping
initialize_weights(self)
def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False):
device = h.device
A, h = self.attention_net(h) # NxK
A = torch.transpose(A, 1, 0) # KxN
if attention_only:
return A
A_raw = A
A = F.softmax(A, dim=1) # softmax over N
if instance_eval:
total_inst_loss = 0.0
all_preds = []
all_targets = []
inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() #binarize label
for i in range(len(self.instance_classifiers)):
inst_label = inst_labels[i].item()
classifier = self.instance_classifiers[i]
if inst_label == 1: #in-the-class:
instance_loss, preds, targets = self.inst_eval(A[i], h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else: #out-of-the-class
if self.subtyping:
instance_loss, preds, targets = self.inst_eval_out(A[i], h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else:
continue
total_inst_loss += instance_loss
if self.subtyping:
total_inst_loss /= len(self.instance_classifiers)
M = torch.mm(A, h)
logits = torch.empty(1, self.n_classes).float().to(device)
for c in range(self.n_classes):
logits[0, c] = self.classifiers[c](M[c])
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
if instance_eval:
results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets),
'inst_preds': np.array(all_preds)}
else:
results_dict = {}
if return_features:
results_dict.update({'features': M})
return logits, Y_prob, Y_hat, A_raw, results_dict
| 9,447 | 43.990476 | 128 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_hierarchical_mil.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import numpy as np
from os.path import join
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.model_utils import *
import sys
sys.path.append('../HIPT_4K/')
from vision_transformer4k import vit4k_xs
######################################
# HIPT w/o Transformers #
######################################
class HIPT_None_FC(nn.Module):
def __init__(self, path_input_dim=384, size_arg = "small", dropout=0.25, n_classes=2):
super(HIPT_None_FC, self).__init__()
self.size_dict_path = {"small": [path_input_dim, 256, 256], "big": [path_input_dim, 512, 384]}
size = self.size_dict_path[size_arg]
### Local Aggregation
self.local_phi = nn.Sequential(
nn.Linear(size[0], size[1]), nn.ReLU(), nn.Dropout(0.25),
)
self.local_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
### Global Aggregation
self.global_phi = nn.Sequential(
nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25),
)
self.global_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
self.global_rho = nn.Sequential(*[nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25)])
self.classifier = nn.Linear(size[1], n_classes)
def forward(self, h, **kwargs):
x_256 = h
### Local
h_256 = self.local_phi(x_256)
A_256, h_256 = self.local_attn_pool(h_256)
A_256 = A_256.squeeze(dim=2) # A = torch.transpose(A, 1, 0)
A_256 = F.softmax(A_256, dim=1)
h_4096 = torch.bmm(A_256.unsqueeze(dim=1), h_256).squeeze(dim=1)
### Global
h_4096 = self.global_phi(h_4096)
A_4096, h_4096 = self.global_attn_pool(h_4096)
A_4096 = torch.transpose(A_4096, 1, 0)
A_4096 = F.softmax(A_4096, dim=1)
h_path = torch.mm(A_4096, h_4096)
h_path = self.global_rho(h_path)
logits = self.classifier(h_path)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
return logits, Y_prob, Y_hat, None, None
######################################
# 3-Stage HIPT Implementation (With Local-Global Pretraining) #
######################################
class HIPT_LGP_FC(nn.Module):
def __init__(self, path_input_dim=384, size_arg = "small", dropout=0.25, n_classes=4,
pretrain_4k='None', freeze_4k=False, pretrain_WSI='None', freeze_WSI=False):
super(HIPT_LGP_FC, self).__init__()
self.size_dict_path = {"small": [384, 192, 192], "big": [1024, 512, 384]}
#self.fusion = fusion
size = self.size_dict_path[size_arg]
### Local Aggregation
self.local_vit = vit4k_xs()
if pretrain_4k != 'None':
print("Loading Pretrained Local VIT model...",)
state_dict = torch.load('../../HIPT_4K/Checkpoints/%s.pth' % pretrain_4k, map_location='cpu')['teacher']
state_dict = {k.replace('module.', ""): v for k, v in state_dict.items()}
state_dict = {k.replace('backbone.', ""): v for k, v in state_dict.items()}
missing_keys, unexpected_keys = self.local_vit.load_state_dict(state_dict, strict=False)
print("Done!")
if freeze_4k:
print("Freezing Pretrained Local VIT model")
for param in self.local_vit.parameters():
param.requires_grad = False
print("Done")
### Global Aggregation
self.pretrain_WSI = pretrain_WSI
if pretrain_WSI != 'None':
pass
else:
self.global_phi = nn.Sequential(nn.Linear(192, 192), nn.ReLU(), nn.Dropout(0.25))
self.global_transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(
d_model=192, nhead=3, dim_feedforward=192, dropout=0.25, activation='relu'
),
num_layers=2
)
self.global_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
self.global_rho = nn.Sequential(*[nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25)])
self.classifier = nn.Linear(size[1], n_classes)
def forward(self, x_256, **kwargs):
### Local
h_4096 = self.local_vit(x_256.unfold(1, 16, 16).transpose(1,2))
### Global
if self.pretrain_WSI != 'None':
h_WSI = self.global_vit(h_4096.unsqueeze(dim=0))
else:
h_4096 = self.global_phi(h_4096)
h_4096 = self.global_transformer(h_4096.unsqueeze(1)).squeeze(1)
A_4096, h_4096 = self.global_attn_pool(h_4096)
A_4096 = torch.transpose(A_4096, 1, 0)
A_4096 = F.softmax(A_4096, dim=1)
h_path = torch.mm(A_4096, h_4096)
h_WSI = self.global_rho(h_path)
logits = self.classifier(h_WSI)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, F.softmax(logits, dim=1), Y_hat, None, None
def relocate(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
self.local_vit = nn.DataParallel(self.local_vit, device_ids=device_ids).to('cuda:0')
if self.pretrain_WSI != 'None':
self.global_vit = nn.DataParallel(self.global_vit, device_ids=device_ids).to('cuda:0')
if self.pretrain_WSI == 'None':
self.global_phi = self.global_phi.to(device)
self.global_transformer = self.global_transformer.to(device)
self.global_attn_pool = self.global_attn_pool.to(device)
self.global_rho = self.global_rho.to(device)
self.classifier = self.classifier.to(device)
######################################
# HIPT Implementation (With Local-Global Pretraining) #
######################################
class HIPT_GP_FC(nn.Module):
def __init__(self, path_input_dim=384, size_arg = "small", dropout=0.25, n_classes=4,
pretrain_WSI='None', freeze_WSI=False):
super(HIPT_GP_FC, self).__init__()
self.size_dict_path = {"small": [384, 192, 192], "big": [1024, 512, 384]}
size = self.size_dict_path[size_arg]
### Global Aggregation
self.pretrain_WSI = pretrain_WSI
if pretrain_WSI != 'None':
pass
else:
self.global_phi = nn.Sequential(nn.Linear(192, 192), nn.ReLU(), nn.Dropout(0.25))
self.global_transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(
d_model=192, nhead=3, dim_feedforward=192, dropout=0.25, activation='relu'
),
num_layers=2
)
self.global_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
self.global_rho = nn.Sequential(*[nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25)])
self.classifier = nn.Linear(size[1], n_classes)
def forward(self, h_4096, **kwargs):
### Global
import pdb
pdb.set_trace()
if self.pretrain_WSI != 'None':
h_WSI = self.global_vit(h_4096.unsqueeze(dim=0))
else:
h_4096 = self.global_phi(h_4096)
h_4096 = self.global_transformer(h_4096.unsqueeze(1)).squeeze(1)
A_4096, h_4096 = self.global_attn_pool(h_4096)
A_4096 = torch.transpose(A_4096, 1, 0)
A_4096 = F.softmax(A_4096, dim=1)
h_path = torch.mm(A_4096, h_4096)
h_WSI = self.global_rho(h_path)
logits = self.classifier(h_WSI)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, F.softmax(logits, dim=1), Y_hat, None, None
def relocate(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
if self.pretrain_WSI != 'None':
self.global_vit = nn.DataParallel(self.global_vit, device_ids=device_ids).to('cuda:0')
if self.pretrain_WSI == 'None':
self.global_phi = self.global_phi.to(device)
self.global_transformer = self.global_transformer.to(device)
self.global_attn_pool = self.global_attn_pool.to(device)
self.global_rho = self.global_rho.to(device)
self.classifier = self.classifier.to(device) | 8,672 | 39.528037 | 116 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_dsmil.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FCLayer(nn.Module):
def __init__(self, in_size, out_size=1):
super(FCLayer, self).__init__()
self.fc = nn.Sequential(nn.Linear(in_size, out_size))
def forward(self, feats, **kwargs):
x = self.fc(feats)
return feats, x
class IClassifier(nn.Module):
def __init__(self, feature_extractor, feature_size, output_class):
super(IClassifier, self).__init__()
self.feature_extractor = feature_extractor
self.fc = nn.Linear(feature_size, output_class)
def forward(self, x, **kwargs):
device = x.device
feats = self.feature_extractor(x) # N x K
c = self.fc(feats.view(feats.shape[0], -1)) # N x C
return feats.view(feats.shape[0], -1), c
class BClassifier(nn.Module):
def __init__(self, input_size, output_class, dropout_v=0.0): # K, L, N
super(BClassifier, self).__init__()
self.q = nn.Linear(input_size, 128)
self.v = nn.Sequential(
nn.Dropout(dropout_v),
nn.Linear(input_size, input_size)
)
### 1D convolutional layer that can handle multiple class (including binary)
self.fcc = nn.Conv1d(output_class, output_class, kernel_size=input_size)
def forward(self, feats, c, **kwargs): # N x K, N x C
device = feats.device
V = self.v(feats) # N x V, unsorted
Q = self.q(feats).view(feats.shape[0], -1) # N x Q, unsorted
# handle multiple classes without for loop
_, m_indices = torch.sort(c, 0, descending=True) # sort class scores along the instance dimension, m_indices in shape N x C
m_feats = torch.index_select(feats, dim=0, index=m_indices[0, :]) # select critical instances, m_feats in shape C x K
q_max = self.q(m_feats) # compute queries of critical instances, q_max in shape C x Q
A = torch.mm(Q, q_max.transpose(0, 1)) # compute inner product of Q to each entry of q_max, A in shape N x C, each column contains unnormalized attention scores
A = F.softmax( A / torch.sqrt(torch.tensor(Q.shape[1], dtype=torch.float32, device=device)), 0) # normalize attention scores, A in shape N x C,
B = torch.mm(A.transpose(0, 1), V) # compute bag representation, B in shape C x V
B = B.view(1, B.shape[0], B.shape[1]) # 1 x C x V
C = self.fcc(B) # 1 x C x 1
C = C.view(1, -1)
return C, A, B
class MILNet(nn.Module):
def __init__(self, i_classifier, b_classifier):
super(MILNet, self).__init__()
self.i_classifier = i_classifier
self.b_classifier = b_classifier
def forward(self, x, **kwargs):
feats, classes = self.i_classifier(x)
prediction_bag, A, B = self.b_classifier(feats, classes)
max_prediction, _ = torch.max(classes, 0)
logits = 0.5 * (prediction_bag + max_prediction)
Y_prob = F.softmax(logits, dim = 1)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, Y_prob, Y_hat, 0, 0 # The last two return are just dummy vars
#return classes, prediction_bag, A, B
#(original ins_prediction, bag_prediction, _, _ )) | 3,324 | 43.333333 | 168 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_cluster.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import numpy as np
from os.path import join
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
######################################
# Deep Attention MISL Implementation #
######################################
class MIL_Cluster_FC(nn.Module):
def __init__(self, path_input_dim=1024, num_clusters=10, size_arg = "small", dropout=0.25, n_classes=4):
r"""
Attention MIL Implementation
Args:
omic_input_dim (int): Dimension size of genomic features.
fusion (str): Fusion method (Choices: concat, bilinear, or None)
size_arg (str): Size of NN architecture (Choices: small or large)
dropout (float): Dropout rate
n_classes (int): Output shape of NN
"""
super(MIL_Cluster_FC, self).__init__()
self.size_dict_path = {"small": [path_input_dim, 512, 256], "big": [path_input_dim, 512, 384]}
self.size_dict_omic = {'small': [256, 256]}
self.num_clusters = num_clusters
### FC Cluster layers + Pooling
size = self.size_dict_path[size_arg]
if path_input_dim == 384:
size = [path_input_dim, path_input_dim, 256]
phis = []
for phenotype_i in range(num_clusters):
phi = [nn.Linear(size[0], size[1]), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(dropout)]
phis.append(nn.Sequential(*phi))
self.phis = nn.ModuleList(phis)
self.pool1d = nn.AdaptiveAvgPool1d(1)
### WSI Attention MIL Construction
fc = [nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(dropout)]
attention_net = Attn_Net_Gated(L=size[1], D=size[2], dropout=dropout, n_classes=1)
fc.append(attention_net)
self.attention_net = nn.Sequential(*fc)
self.rho = nn.Sequential(*[nn.Linear(size[1], size[2]), nn.ReLU(), nn.Dropout(dropout)])
self.classifier = nn.Linear(size[2], n_classes)
def relocate(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
self.attention_net = nn.DataParallel(self.attention_net, device_ids=device_ids).to('cuda:0')
else:
self.attention_net = self.attention_net.to(device)
self.phis = self.phis.to(device)
self.pool1d = self.pool1d.to(device)
self.rho = self.rho.to(device)
self.classifier = self.classifier.to(device)
def forward(self, data, **kwargs):
x_path = data
cluster_id = kwargs['cluster_id'].detach().cpu().numpy()
### FC Cluster layers + Pooling
h_cluster = []
for i in range(self.num_clusters):
h_cluster_i = self.phis[i](x_path[cluster_id==i])
if h_cluster_i.shape[0] == 0:
h_cluster_i = torch.zeros((1,384)).to(torch.device('cuda'))
h_cluster.append(self.pool1d(h_cluster_i.T.unsqueeze(0)).squeeze(2))
h_cluster = torch.stack(h_cluster, dim=1).squeeze(0)
### Attention MIL
A, h_path = self.attention_net(h_cluster)
A = torch.transpose(A, 1, 0)
A_raw = A
A = F.softmax(A, dim=1)
h_path = torch.mm(A, h_path)
h = self.rho(h_path).squeeze()
logits = self.classifier(h).unsqueeze(0)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
return logits, Y_prob, Y_hat, None, None
| 3,697 | 37.520833 | 108 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/resnet_custom.py | # modified from Pytorch official resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from torchsummary import summary
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck_Baseline(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck_Baseline, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_Baseline(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet_Baseline, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def resnet50_baseline(pretrained=False):
"""Constructs a Modified ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_Baseline(Bottleneck_Baseline, [3, 4, 6, 3])
if pretrained:
model = load_pretrained_weights(model, 'resnet50')
return model
def load_pretrained_weights(model, name):
pretrained_dict = model_zoo.load_url(model_urls[name])
model.load_state_dict(pretrained_dict, strict=False)
return model
| 4,314 | 32.976378 | 90 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/datasets/dataset_generic.py | from __future__ import print_function, division
import os
import torch
import numpy as np
import pandas as pd
import math
import re
import pdb
import pickle
from scipy import stats
from torch.utils.data import Dataset
import h5py
from utils.utils import generate_split, nth
def save_splits(split_datasets, column_keys, filename, boolean_style=False):
splits = [split_datasets[i].slide_data['slide_id'] for i in range(len(split_datasets))]
if not boolean_style:
df = pd.concat(splits, ignore_index=True, axis=1)
df.columns = column_keys
else:
df = pd.concat(splits, ignore_index = True, axis=0)
index = df.values.tolist()
one_hot = np.eye(len(split_datasets)).astype(bool)
bool_array = np.repeat(one_hot, [len(dset) for dset in split_datasets], axis=0)
df = pd.DataFrame(bool_array, index=index, columns = ['train', 'val', 'test'])
df.to_csv(filename)
print()
class Generic_WSI_Classification_Dataset(Dataset):
def __init__(self,
csv_path = 'dataset_csv/ccrcc_clean.csv',
shuffle = False,
seed = 7,
print_info = True,
label_dict = {},
filter_dict = {},
ignore=[],
patient_strat=False,
label_col = None,
patient_voting = 'max',
mode = 'path',
prop = 1.0,
):
"""
Args:
csv_file (string): Path to the csv file with annotations.
shuffle (boolean): Whether to shuffle
seed (int): random seed for shuffling the data
print_info (boolean): Whether to print a summary of the dataset
label_dict (dict): Dictionary with key, value pairs for converting str labels to int
ignore (list): List containing class labels to ignore
"""
self.label_dict = label_dict
self.num_classes = len(set(self.label_dict.values()))
self.seed = seed
self.print_info = print_info
self.patient_strat = patient_strat
self.train_ids, self.val_ids, self.test_ids = (None, None, None)
self.data_dir = None
if not label_col:
label_col = 'label'
self.label_col = label_col
self.mode = prop
self.prop = prop
slide_data = pd.read_csv(csv_path)
slide_data = self.filter_df(slide_data, filter_dict)
slide_data = self.df_prep(slide_data, self.label_dict, ignore, self.label_col)
###shuffle data
if shuffle:
np.random.seed(seed)
np.random.shuffle(slide_data)
self.slide_data = slide_data
self.patient_data_prep(patient_voting)
self.cls_ids_prep()
if print_info:
self.summarize()
def cls_ids_prep(self):
# store ids corresponding each class at the patient or case level
self.patient_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.patient_cls_ids[i] = np.where(self.patient_data['label'] == i)[0]
# store ids corresponding each class at the slide level
self.slide_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.slide_cls_ids[i] = np.where(self.slide_data['label'] == i)[0]
def patient_data_prep(self, patient_voting='max'):
patients = np.unique(np.array(self.slide_data['case_id'])) # get unique patients
patient_labels = []
for p in patients:
locations = self.slide_data[self.slide_data['case_id'] == p].index.tolist()
assert len(locations) > 0
label = self.slide_data['label'][locations].values
if patient_voting == 'max':
label = label.max() # get patient label (MIL convention)
elif patient_voting == 'maj':
label = stats.mode(label)[0]
else:
raise NotImplementedError
patient_labels.append(label)
self.patient_data = {'case_id':patients, 'label':np.array(patient_labels)}
@staticmethod
def df_prep(data, label_dict, ignore, label_col):
if label_col != 'label':
data['label'] = data[label_col].copy()
mask = data['label'].isin(ignore)
data = data[~mask]
data.reset_index(drop=True, inplace=True)
for i in data.index:
key = data.loc[i, 'label']
data.at[i, 'label'] = label_dict[key]
return data
def filter_df(self, df, filter_dict={}):
if len(filter_dict) > 0:
filter_mask = np.full(len(df), True, bool)
# assert 'label' not in filter_dict.keys()
for key, val in filter_dict.items():
mask = df[key].isin(val)
filter_mask = np.logical_and(filter_mask, mask)
df = df[filter_mask]
return df
def __len__(self):
if self.patient_strat:
return len(self.patient_data['case_id'])
else:
return len(self.slide_data)
def summarize(self):
print("label column: {}".format(self.label_col))
print("label dictionary: {}".format(self.label_dict))
print("number of classes: {}".format(self.num_classes))
print("slide-level counts: ", '\n', self.slide_data['label'].value_counts(sort = False))
for i in range(self.num_classes):
print('Patient-LVL; Number of samples registered in class %d: %d' % (i, self.patient_cls_ids[i].shape[0]))
print('Slide-LVL; Number of samples registered in class %d: %d' % (i, self.slide_cls_ids[i].shape[0]))
def create_splits(self, k = 3, val_num = (25, 25), test_num = (40, 40), label_frac = 1.0, custom_test_ids = None):
settings = {
'n_splits' : k,
'val_num' : val_num,
'test_num': test_num,
'label_frac': label_frac,
'seed': self.seed,
'custom_test_ids': custom_test_ids
}
if self.patient_strat:
settings.update({'cls_ids' : self.patient_cls_ids, 'samples': len(self.patient_data['case_id'])})
else:
settings.update({'cls_ids' : self.slide_cls_ids, 'samples': len(self.slide_data)})
self.split_gen = generate_split(**settings)
def set_splits(self,start_from=None):
if start_from:
ids = nth(self.split_gen, start_from)
else:
ids = next(self.split_gen)
if self.patient_strat:
slide_ids = [[] for i in range(len(ids))]
for split in range(len(ids)):
for idx in ids[split]:
case_id = self.patient_data['case_id'][idx]
slide_indices = self.slide_data[self.slide_data['case_id'] == case_id].index.tolist()
slide_ids[split].extend(slide_indices)
self.train_ids, self.val_ids, self.test_ids = slide_ids[0], slide_ids[1], slide_ids[2]
else:
self.train_ids, self.val_ids, self.test_ids = ids
def get_split_from_df(self, all_splits, split_key='train'):
split = all_splits[split_key].str.rstrip('.svs')
split = split.dropna().reset_index(drop=True)
if len(split) > 0:
mask = self.slide_data['slide_id'].isin(split.tolist())
df_slice = self.slide_data[mask].reset_index(drop=True)
if split_key == 'train' and self.prop != 1.0:
df_slice = df_slice.sample(frac=self.prop, random_state=self.seed).reset_index(drop=True)
if split_key == 'train':
print(df_slice.head())
print("Traing Data Size ({%0.2f}): %d" % (self.prop, df_slice.shape[0]))
split = Generic_Split(df_slice, data_dir=self.data_dir, mode=self.mode, prop=self.prop, num_classes=self.num_classes)
else:
split = None
return split
def get_merged_split_from_df(self, all_splits, split_keys=['train']):
merged_split = []
for split_key in split_keys:
split = all_splits[split_key]
split = split.dropna().reset_index(drop=True).tolist()
merged_split.extend(split)
if len(split) > 0:
mask = self.slide_data['slide_id'].isin(merged_split)
df_slice = self.slide_data[mask].reset_index(drop=True)
split = Generic_Split(df_slice, data_dir=self.data_dir, mode=self.mode, prop=self.prop, num_classes=self.num_classes)
else:
split = None
return split
def return_splits(self, from_id=True, csv_path=None):
if from_id:
if len(self.train_ids) > 0:
train_data = self.slide_data.loc[self.train_ids].reset_index(drop=True)
#if self.prop != 1.0:
# train_data = train_data.sample(frac=self.prop, random_state=self.seed)
#print("Traing Data Size ({0.2f}): %d" % (self.prop, train_data.shape[0]))
train_split = Generic_Split(train_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
train_split = None
if len(self.val_ids) > 0:
val_data = self.slide_data.loc[self.val_ids].reset_index(drop=True)
val_split = Generic_Split(val_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
val_split = None
if len(self.test_ids) > 0:
test_data = self.slide_data.loc[self.test_ids].reset_index(drop=True)
test_split = Generic_Split(test_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
test_split = None
else:
assert csv_path
all_splits = pd.read_csv(csv_path)
train_split = self.get_split_from_df(all_splits, 'train')
val_split = self.get_split_from_df(all_splits, 'val')
test_split = self.get_split_from_df(all_splits, 'test')
return train_split, val_split, test_split
def get_list(self, ids):
return self.slide_data['slide_id'][ids]
def getlabel(self, ids):
return self.slide_data['label'][ids]
def __getitem__(self, idx):
return None
def test_split_gen(self, return_descriptor=False):
if return_descriptor:
index = [list(self.label_dict.keys())[list(self.label_dict.values()).index(i)] for i in range(self.num_classes)]
columns = ['train', 'val', 'test']
df = pd.DataFrame(np.full((len(index), len(columns)), 0, dtype=np.int32), index= index,
columns= columns)
count = len(self.train_ids)
print('\nnumber of training samples: {}'.format(count))
labels = self.getlabel(self.train_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'train'] = counts[u]
count = len(self.val_ids)
print('\nnumber of val samples: {}'.format(count))
labels = self.getlabel(self.val_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'val'] = counts[u]
count = len(self.test_ids)
print('\nnumber of test samples: {}'.format(count))
labels = self.getlabel(self.test_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'test'] = counts[u]
assert len(np.intersect1d(self.train_ids, self.test_ids)) == 0
assert len(np.intersect1d(self.train_ids, self.val_ids)) == 0
assert len(np.intersect1d(self.val_ids, self.test_ids)) == 0
if return_descriptor:
return df
def save_split(self, filename):
train_split = self.get_list(self.train_ids)
val_split = self.get_list(self.val_ids)
test_split = self.get_list(self.test_ids)
df_tr = pd.DataFrame({'train': train_split})
df_v = pd.DataFrame({'val': val_split})
df_t = pd.DataFrame({'test': test_split})
df = pd.concat([df_tr, df_v, df_t], axis=1)
df.to_csv(filename, index = False)
class Generic_MIL_Dataset(Generic_WSI_Classification_Dataset):
def __init__(self,
data_dir,
mode='path',
prop=1.0,
**kwargs):
super(Generic_MIL_Dataset, self).__init__(**kwargs)
self.data_dir = data_dir
self.use_h5 = False
self.mode = mode
self.prop = prop
self.slide_data['slide_id'] = self.slide_data['slide_id'].apply(lambda x: x.replace(".svs", ""))
print("Slide data in geneic mil", self.slide_data)
def load_from_h5(self, toggle):
self.use_h5 = toggle
def __getitem__(self, idx):
slide_id = self.slide_data['slide_id'][idx]
label = self.slide_data['label'][idx]
if type(self.data_dir) == dict:
source = self.slide_data['source'][idx]
data_dir = self.data_dir[source]
else:
data_dir = self.data_dir
if not self.use_h5:
if self.mode == 'path' or self.mode == 'local_region_features':
full_path = os.path.join(data_dir, '{}.pt'.format(slide_id.replace(".svs","")))
features = torch.load(full_path)
if 'dino' in full_path:
if 'vits_tcga_pancancer_dino' in full_path:
pass
else:
features = features[:,1152:1536]
return features, label
elif self.mode == 'pyramid':
full_path = os.path.join(data_dir, '{}.pt'.format(slide_id.replace(".svs","")))
features = torch.load(full_path)
return features, label
elif self.mode == 'cluster':
path_features = []
cluster_ids = []
wsi_path = os.path.join(data_dir, '{}.pt'.format(slide_id.replace(".svs","")))
wsi_bag = torch.load(wsi_path)
if 'dino' in wsi_path:
wsi_bag = wsi_bag[:,1152:1536]
path_features.append(wsi_bag)
cluster_ids.extend(self.fname2ids[slide_id+'.pt'])
path_features = torch.cat(path_features, dim=0)
cluster_ids = torch.Tensor(cluster_ids)
return (path_features, cluster_ids, label)
elif self.mode == 'graph':
path_features = []
from datasets.BatchWSI import BatchWSI
wsi_path = os.path.join("/".join(data_dir.split("/")[0:-1]), 'vits_tcga_pancancer_dino_h5_graph_features', '{}.pt'.format(slide_id.replace('.svs','')))
wsi_bag = torch.load(wsi_path)
path_features.append(wsi_bag)
path_features = BatchWSI.from_data_list(path_features, update_cat_dims={'edge_latent': 1})
return (path_features, label)
else:
return None
class Generic_Split(Generic_MIL_Dataset):
def __init__(self, slide_data, data_dir=None, mode='path', prop=1.0, num_classes=2):
self.use_h5 = False
self.slide_data = slide_data
self.data_dir = data_dir
self.mode = mode
self.prop = prop
self.num_classes = num_classes
self.slide_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.slide_cls_ids[i] = np.where(self.slide_data['label'] == i)[0]
cluster_dir = "/".join(data_dir.split("/")[0:-1])
if os.path.isfile(os.path.join(cluster_dir, 'fast_cluster_ids.pkl')):
with open(os.path.join(cluster_dir, 'fast_cluster_ids.pkl'), 'rb') as handle:
self.fname2ids = pickle.load(handle)
else:
print("Cluster file missing")
def __len__(self):
return len(self.slide_data)
| 16,504 | 38.204276 | 167 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/datasets/dataset_h5.py | from __future__ import print_function, division
import os
import torch
import numpy as np
import pandas as pd
import math
import re
import pdb
import pickle
from torch.utils.data import Dataset, DataLoader, sampler
from torchvision import transforms, utils, models
import torch.nn.functional as F
from PIL import Image
import h5py
from random import randrange
def eval_transforms(pretrained=False):
if pretrained:
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
else:
mean = (0.5,0.5,0.5)
std = (0.5,0.5,0.5)
trnsfrms_val = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std)
]
)
return trnsfrms_val
class Whole_Slide_Bag(Dataset):
def __init__(self,
file_path,
pretrained=False,
custom_transforms=None,
target_patch_size=-1,
):
"""
Args:
file_path (string): Path to the .h5 file containing patched data.
pretrained (bool): Use ImageNet transforms
custom_transforms (callable, optional): Optional transform to be applied on a sample
"""
self.pretrained=pretrained
if target_patch_size > 0:
self.target_patch_size = (target_patch_size, target_patch_size)
else:
self.target_patch_size = None
if not custom_transforms:
self.roi_transforms = eval_transforms(pretrained=pretrained)
else:
self.roi_transforms = custom_transforms
self.file_path = file_path
with h5py.File(self.file_path, "r") as f:
dset = f['imgs']
self.length = len(dset)
self.summary()
def __len__(self):
return self.length
def summary(self):
hdf5_file = h5py.File(self.file_path, "r")
dset = hdf5_file['imgs']
for name, value in dset.attrs.items():
print(name, value)
print('pretrained:', self.pretrained)
print('transformations:', self.roi_transforms)
if self.target_patch_size is not None:
print('target_size: ', self.target_patch_size)
def __getitem__(self, idx):
with h5py.File(self.file_path,'r') as hdf5_file:
img = hdf5_file['imgs'][idx]
coord = hdf5_file['coords'][idx]
img = Image.fromarray(img)
if self.target_patch_size is not None:
img = img.resize(self.target_patch_size)
img = self.roi_transforms(img).unsqueeze(0)
return img, coord
class Whole_Slide_Bag_FP(Dataset):
def __init__(self,
file_path,
wsi,
pretrained=False,
custom_transforms=None,
custom_downsample=1,
target_patch_size=-1
):
"""
Args:
file_path (string): Path to the .h5 file containing patched data.
pretrained (bool): Use ImageNet transforms
custom_transforms (callable, optional): Optional transform to be applied on a sample
custom_downsample (int): Custom defined downscale factor (overruled by target_patch_size)
target_patch_size (int): Custom defined image size before embedding
"""
self.pretrained=pretrained
self.wsi = wsi
if not custom_transforms:
self.roi_transforms = eval_transforms(pretrained=pretrained)
else:
self.roi_transforms = custom_transforms
self.file_path = file_path
with h5py.File(self.file_path, "r") as f:
dset = f['coords']
self.patch_level = f['coords'].attrs['patch_level']
self.patch_size = f['coords'].attrs['patch_size']
self.length = len(dset)
if target_patch_size > 0:
self.target_patch_size = (target_patch_size, ) * 2
elif custom_downsample > 1:
self.target_patch_size = (self.patch_size // custom_downsample, ) * 2
else:
self.target_patch_size = None
self.summary()
def __len__(self):
return self.length
def summary(self):
hdf5_file = h5py.File(self.file_path, "r")
dset = hdf5_file['coords']
for name, value in dset.attrs.items():
print(name, value)
print('\nfeature extraction settings')
print('target patch size: ', self.target_patch_size)
print('pretrained: ', self.pretrained)
print('transformations: ', self.roi_transforms)
def __getitem__(self, idx):
with h5py.File(self.file_path,'r') as hdf5_file:
coord = hdf5_file['coords'][idx]
img = self.wsi.read_region(coord, self.patch_level, (self.patch_size, self.patch_size)).convert('RGB')
if self.target_patch_size is not None:
img = img.resize(self.target_patch_size)
img = self.roi_transforms(img).unsqueeze(0)
return img, coord
class Dataset_All_Bags(Dataset):
def __init__(self, csv_path):
self.df = pd.read_csv(csv_path)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
return self.df['slide_id'][idx]
| 4,426 | 24.738372 | 104 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/datasets/BatchWSI.py | import torch_geometric
from typing import List
import torch
from torch import Tensor
from torch_sparse import SparseTensor, cat
import torch_geometric
from torch_geometric.data import Data
class BatchWSI(torch_geometric.data.Batch):
def __init__(self):
super(BatchWSI, self).__init__()
pass
@classmethod
def from_data_list(cls, data_list, follow_batch=[], exclude_keys=[], update_cat_dims={}):
r"""Constructs a batch object from a python list holding
:class:`torch_geometric.data.Data` objects.
The assignment vector :obj:`batch` is created on the fly.
Additionally, creates assignment batch vectors for each key in
:obj:`follow_batch`.
Will exclude any keys given in :obj:`exclude_keys`."""
keys = list(set(data_list[0].keys) - set(exclude_keys))
assert 'batch' not in keys and 'ptr' not in keys
batch = cls()
for key in data_list[0].__dict__.keys():
if key[:2] != '__' and key[-2:] != '__':
batch[key] = None
batch.__num_graphs__ = len(data_list)
batch.__data_class__ = data_list[0].__class__
for key in keys + ['batch']:
batch[key] = []
batch['ptr'] = [0]
cat_dims = {}
device = None
slices = {key: [0] for key in keys}
cumsum = {key: [0] for key in keys}
num_nodes_list = []
for i, data in enumerate(data_list):
for key in keys:
item = data[key]
# Increase values by `cumsum` value.
cum = cumsum[key][-1]
if isinstance(item, Tensor) and item.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
item = item + cum
elif isinstance(item, SparseTensor):
value = item.storage.value()
if value is not None and value.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
value = value + cum
item = item.set_value(value, layout='coo')
elif isinstance(item, (int, float)):
item = item + cum
# Gather the size of the `cat` dimension.
size = 1
if key in update_cat_dims.keys():
cat_dim = update_cat_dims[key]
else:
cat_dim = data.__cat_dim__(key, data[key])
# 0-dimensional tensors have no dimension along which to
# concatenate, so we set `cat_dim` to `None`.
if isinstance(item, Tensor) and item.dim() == 0:
cat_dim = None
cat_dims[key] = cat_dim
# Add a batch dimension to items whose `cat_dim` is `None`:
if isinstance(item, Tensor) and cat_dim is None:
cat_dim = 0 # Concatenate along this new batch dimension.
item = item.unsqueeze(0)
device = item.device
elif isinstance(item, Tensor):
size = item.size(cat_dim)
device = item.device
elif isinstance(item, SparseTensor):
size = torch.tensor(item.sizes())[torch.tensor(cat_dim)]
device = item.device()
batch[key].append(item) # Append item to the attribute list.
slices[key].append(size + slices[key][-1])
inc = data.__inc__(key, item)
if isinstance(inc, (tuple, list)):
inc = torch.tensor(inc)
cumsum[key].append(inc + cumsum[key][-1])
if key in follow_batch:
if isinstance(size, Tensor):
for j, size in enumerate(size.tolist()):
tmp = f'{key}_{j}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
else:
tmp = f'{key}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
if hasattr(data, '__num_nodes__'):
num_nodes_list.append(data.__num_nodes__)
else:
num_nodes_list.append(None)
num_nodes = data.num_nodes
if num_nodes is not None:
item = torch.full((num_nodes, ), i, dtype=torch.long,
device=device)
batch.batch.append(item)
batch.ptr.append(batch.ptr[-1] + num_nodes)
batch.batch = None if len(batch.batch) == 0 else batch.batch
batch.ptr = None if len(batch.ptr) == 1 else batch.ptr
batch.__slices__ = slices
batch.__cumsum__ = cumsum
batch.__cat_dims__ = cat_dims
batch.__num_nodes_list__ = num_nodes_list
ref_data = data_list[0]
for key in batch.keys:
items = batch[key]
item = items[0]
### <--- Updating Cat Dim
if key in update_cat_dims.keys():
cat_dim = update_cat_dims[key]
else:
cat_dim = ref_data.__cat_dim__(key, item)
cat_dim = 0 if cat_dim is None else cat_dim
### ---?
if isinstance(item, Tensor):
batch[key] = torch.cat(items, cat_dim)
elif isinstance(item, SparseTensor):
batch[key] = cat(items, cat_dim)
elif isinstance(item, (int, float)):
batch[key] = torch.tensor(items)
if torch_geometric.is_debug_enabled():
batch.debug()
return batch.contiguous() | 6,596 | 42.98 | 93 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/utils/core_utils.py | import numpy as np
import torch
import torch.nn.functional as F
from utils.utils import *
import os
import torch.nn.functional as F
from datasets.dataset_generic import save_splits
from models.model_dsmil import *
from models.model_mil import MIL_fc, MIL_fc_mc
from models.model_dgcn import DeepGraphConv
from models.model_clam import CLAM_MB, CLAM_SB
from models.model_cluster import MIL_Cluster_FC
from models.model_hierarchical_mil import HIPT_None_FC, HIPT_LGP_FC, HIPT_GP_FC
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics import auc as calc_auc
import sys
#from utils.gpu_utils import gpu_profile, print_gpu_mem
#os.environ['GPU_DEBUG']='0'
class Accuracy_Logger(object):
"""Accuracy logger"""
def __init__(self, n_classes):
super(Accuracy_Logger, self).__init__()
self.n_classes = n_classes
self.initialize()
def initialize(self):
self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
def log(self, Y_hat, Y):
Y_hat = int(Y_hat)
Y = int(Y)
self.data[Y]["count"] += 1
self.data[Y]["correct"] += (Y_hat == Y)
def log_batch(self, Y_hat, Y):
Y_hat = np.array(Y_hat).astype(int)
Y = np.array(Y).astype(int)
for label_class in np.unique(Y):
cls_mask = Y == label_class
self.data[label_class]["count"] += cls_mask.sum()
self.data[label_class]["correct"] += (Y_hat[cls_mask] == Y[cls_mask]).sum()
def get_summary(self, c):
count = self.data[c]["count"]
correct = self.data[c]["correct"]
if count == 0:
acc = None
else:
acc = float(correct) / count
return acc, correct, count
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=20, stop_epoch=50, verbose=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 20
stop_epoch (int): Earliest epoch possible for stopping
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.stop_epoch = stop_epoch
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
def __call__(self, epoch, val_loss, model, ckpt_name = 'checkpoint.pt'):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, ckpt_name)
elif score < self.best_score:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience and epoch > self.stop_epoch:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, ckpt_name)
self.counter = 0
def save_checkpoint(self, val_loss, model, ckpt_name):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), ckpt_name)
self.val_loss_min = val_loss
def train(datasets, cur, args):
"""
train for a single fold
"""
print('\nTraining Fold {}!'.format(cur))
writer_dir = os.path.join(args.results_dir, str(cur))
if not os.path.isdir(writer_dir):
os.mkdir(writer_dir)
if args.log_data:
from tensorboardX import SummaryWriter
writer = SummaryWriter(writer_dir, flush_secs=15)
else:
writer = None
print('\nInit train/val/test splits...', end=' ')
train_split, val_split, test_split = datasets
save_splits(datasets, ['train', 'val', 'test'], os.path.join(args.results_dir, 'splits_{}.csv'.format(cur)))
print('Done!')
print("Training on {} samples".format(len(train_split)))
print("Validating on {} samples".format(len(val_split)))
print("Testing on {} samples".format(len(test_split)))
print('\nInit loss function...', end=' ')
if args.bag_loss == 'svm':
from topk import SmoothTop1SVM
loss_fn = SmoothTop1SVM(n_classes = args.n_classes)
if device.type == 'cuda':
loss_fn = loss_fn.cuda()
else:
loss_fn = nn.CrossEntropyLoss()
print('Done!')
print('\nInit Model...', end=' ')
model_dict = {'path_input_dim': args.path_input_dim, "dropout": args.drop_out, 'n_classes': args.n_classes}
if args.model_type == 'clam' and args.subtyping:
model_dict.update({'subtyping': True})
if args.model_size is not None and args.model_type != 'mil':
model_dict.update({"size_arg": args.model_size})
if args.model_type in ['clam_sb', 'clam_mb']:
if args.subtyping:
model_dict.update({'subtyping': True})
if args.B > 0:
model_dict.update({'k_sample': args.B})
if args.inst_loss == 'svm':
from topk import SmoothTop1SVM
instance_loss_fn = SmoothTop1SVM(n_classes = 2)
if device.type == 'cuda':
instance_loss_fn = instance_loss_fn.cuda()
else:
instance_loss_fn = nn.CrossEntropyLoss()
if args.model_type =='clam_sb':
model = CLAM_SB(**model_dict, instance_loss_fn=instance_loss_fn)
elif args.model_type == 'clam_mb':
model = CLAM_MB(**model_dict, instance_loss_fn=instance_loss_fn)
else:
raise NotImplementedError
elif 'hipt' in args.model_type:
if args.model_type == 'hipt_n':
model = HIPT_None_FC(**model_dict)
elif args.model_type == 'hipt_lgp':
model = HIPT_LGP_FC(**model_dict, freeze_4k=args.freeze_4k, pretrain_4k=args.pretrain_4k, freeze_WSI=args.freeze_WSI, pretrain_WSI=args.pretrain_WSI)
elif args.model_type == 'hipt_gp':
model = HIPT_GP_FC(**model_dict, freeze_WSI=args.freeze_WSI, pretrain_WSI=args.pretrain_WSI)
elif args.model_type == 'dsmil':
i_classifier = FCLayer(in_size=args.path_input_dim, out_size=model_dict['n_classes'])
b_classifier = BClassifier(input_size=args.path_input_dim, output_class=model_dict['n_classes'], dropout_v=0.0)
model = MILNet(i_classifier, b_classifier)
elif args.model_type == 'dgcn':
model_dict = {'path_input_dim': args.path_input_dim}
model = DeepGraphConv(num_features=model_dict['path_input_dim'], n_classes=args.n_classes)
elif args.model_type == 'mi_fcn':
model = MIL_Cluster_FC(path_input_dim=args.path_input_dim, n_classes=args.n_classes)
else: # args.model_type == 'mil'
if args.n_classes > 2:
model = MIL_fc_mc(**model_dict)
else:
model = MIL_fc(**model_dict)
if hasattr(model, "relocate"):
model.relocate()
else:
model = model.to(torch.device('cuda'))
print('Done!')
print_network(model)
print('\nInit optimizer ...', end=' ')
optimizer = get_optim(model, args)
print('Done!')
print('\nInit Loaders...', end=' ')
train_loader = get_split_loader(train_split, training=True, testing = args.testing, weighted = args.weighted_sample, mode=args.mode)
val_loader = get_split_loader(val_split, testing = args.testing, mode=args.mode)
test_loader = get_split_loader(test_split, testing = args.testing, mode=args.mode)
print('Done!')
print('\nSetup EarlyStopping...', end=' ')
if args.early_stopping:
early_stopping = EarlyStopping(patience = 20, stop_epoch=50, verbose = True)
else:
early_stopping = None
print('Done!')
for epoch in range(args.max_epochs):
if args.model_type in ['clam_sb', 'clam_mb'] and not args.no_inst_cluster:
train_loop_clam(epoch, model, train_loader, optimizer, args.n_classes, args.bag_weight, writer, loss_fn, dropinput=args.dropinput)
stop = validate_clam(cur, epoch, model, val_loader, args.n_classes,
early_stopping, writer, loss_fn, args.results_dir)
else:
train_loop(epoch, model, train_loader, optimizer, args.n_classes, writer, loss_fn)
stop = validate(cur, epoch, model, val_loader, args.n_classes,
early_stopping, writer, loss_fn, args.results_dir)
if stop:
break
if args.early_stopping:
model.load_state_dict(torch.load(os.path.join(args.results_dir, "s_{}_checkpoint.pt".format(cur))))
else:
torch.save(model.state_dict(), os.path.join(args.results_dir, "s_{}_checkpoint.pt".format(cur)))
_, val_error, val_auc, _= summary(model, val_loader, args.n_classes)
print('Val error: {:.4f}, ROC AUC: {:.4f}'.format(val_error, val_auc))
results_dict, test_error, test_auc, acc_logger = summary(model, test_loader, args.n_classes)
print('Test error: {:.4f}, ROC AUC: {:.4f}'.format(test_error, test_auc))
for i in range(args.n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer:
writer.add_scalar('final/test_class_{}_acc'.format(i), acc, 0)
if writer:
writer.add_scalar('final/val_error', val_error, 0)
writer.add_scalar('final/val_auc', val_auc, 0)
writer.add_scalar('final/test_error', test_error, 0)
writer.add_scalar('final/test_auc', test_auc, 0)
writer.close()
return results_dict, test_auc, val_auc, 1-test_error, 1-val_error
def train_loop(epoch, model, loader, optimizer, n_classes, writer = None, loss_fn = None, gc=32):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
acc_logger = Accuracy_Logger(n_classes=n_classes)
train_loss = 0.
train_error = 0.
print('\n')
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device, non_blocking=True), cluster_id, label.to(device, non_blocking=True)
else:
data, label = batch
data, label = data.to(device, non_blocking=True), label.to(device, non_blocking=True)
cluster_id = None
logits, Y_prob, Y_hat, _, _ = model(data, cluster_id=cluster_id)
#logits, Y_prob, Y_hat, _, _ = model(x_path=data)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
loss_value = loss.item()
train_loss += loss_value
if (batch_idx + 1) % 20 == 0:
print('batch {}, loss: {:.4f}, label: {}, bag_size: {}'.format(batch_idx, loss_value, label.item(), data.size(0)))
error = calculate_error(Y_hat, label)
train_error += error
loss = loss / gc
loss.backward()
# step
optimizer.step()
optimizer.zero_grad()
# calculate loss and error for epoch
train_loss /= len(loader)
train_error /= len(loader)
print('Epoch: {}, train_loss: {:.4f}, train_error: {:.4f}'.format(epoch, train_loss, train_error))
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer:
writer.add_scalar('train/class_{}_acc'.format(i), acc, epoch)
if writer:
writer.add_scalar('train/loss', train_loss, epoch)
writer.add_scalar('train/error', train_error, epoch)
def validate(cur, epoch, model, loader, n_classes, early_stopping = None, writer = None, loss_fn = None, results_dir=None):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
acc_logger = Accuracy_Logger(n_classes=n_classes)
# loader.dataset.update_mode(True)
val_loss = 0.
val_error = 0.
prob = np.zeros((len(loader), n_classes))
labels = np.zeros(len(loader))
with torch.no_grad():
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device, non_blocking=True), cluster_id, label.to(device, non_blocking=True)
else:
data, label = batch
data, label = data.to(device, non_blocking=True), label.to(device, non_blocking=True)
cluster_id = None
logits, Y_prob, Y_hat, _, _ = model(data, cluster_id=cluster_id)
#logits, Y_prob, Y_hat, _, _ = model(x_path=data)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
prob[batch_idx] = Y_prob.cpu().numpy()
labels[batch_idx] = label.item()
val_loss += loss.item()
error = calculate_error(Y_hat, label)
val_error += error
val_error /= len(loader)
val_loss /= len(loader)
if n_classes == 2:
auc = roc_auc_score(labels, prob[:, 1])
else:
auc = roc_auc_score(labels, prob, multi_class='ovr')
if writer:
writer.add_scalar('val/loss', val_loss, epoch)
writer.add_scalar('val/auc', auc, epoch)
writer.add_scalar('val/error', val_error, epoch)
print('\nVal Set, val_loss: {:.4f}, val_error: {:.4f}, auc: {:.4f}'.format(val_loss, val_error, auc))
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if early_stopping:
assert results_dir
early_stopping(epoch, val_loss, model, ckpt_name = os.path.join(results_dir, "s_{}_checkpoint.pt".format(cur)))
if early_stopping.early_stop:
print("Early stopping")
return True
return False
def train_loop_clam(epoch, model, loader, optimizer, n_classes, bag_weight, writer = None, loss_fn = None, dropinput=0.0):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
acc_logger = Accuracy_Logger(n_classes=n_classes)
inst_logger = Accuracy_Logger(n_classes=n_classes)
train_loss = 0.
train_error = 0.
train_inst_loss = 0.
inst_count = 0
print('\n')
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device), cluster_id, label.to(device)
else:
data, label = batch
data, label = data.to(device), label.to(device)
cluster_id = None
if dropinput > 0:
data = F.dropout(data, p=dropinput)
logits, Y_prob, Y_hat, _, instance_dict = model(h=data, cluster_id=cluster_id, label=label, instance_eval=True)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
loss_value = loss.item()
instance_loss = instance_dict['instance_loss']
inst_count+=1
instance_loss_value = instance_loss.item()
train_inst_loss += instance_loss_value
total_loss = bag_weight * loss + (1-bag_weight) * instance_loss
inst_preds = instance_dict['inst_preds']
inst_labels = instance_dict['inst_labels']
inst_logger.log_batch(inst_preds, inst_labels)
train_loss += loss_value
if (batch_idx + 1) % 20 == 0:
print('batch {}, loss: {:.4f}, instance_loss: {:.4f}, weighted_loss: {:.4f}, '.format(batch_idx, loss_value, instance_loss_value, total_loss.item()) +
'label: {}, bag_size: {}'.format(label.item(), data.size(0)))
error = calculate_error(Y_hat, label)
train_error += error
# backward pass
total_loss.backward()
# step
optimizer.step()
optimizer.zero_grad()
# calculate loss and error for epoch
train_loss /= len(loader)
train_error /= len(loader)
if inst_count > 0:
train_inst_loss /= inst_count
print('\n')
for i in range(2):
acc, correct, count = inst_logger.get_summary(i)
print('class {} clustering acc {}: correct {}/{}'.format(i, acc, correct, count))
print('Epoch: {}, train_loss: {:.4f}, train_clustering_loss: {:.4f}, train_error: {:.4f}'.format(epoch, train_loss, train_inst_loss, train_error))
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer and acc is not None:
writer.add_scalar('train/class_{}_acc'.format(i), acc, epoch)
if writer:
writer.add_scalar('train/loss', train_loss, epoch)
writer.add_scalar('train/error', train_error, epoch)
writer.add_scalar('train/clustering_loss', train_inst_loss, epoch)
def validate_clam(cur, epoch, model, loader, n_classes, early_stopping = None, writer = None, loss_fn = None, results_dir = None):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
acc_logger = Accuracy_Logger(n_classes=n_classes)
inst_logger = Accuracy_Logger(n_classes=n_classes)
val_loss = 0.
val_error = 0.
val_inst_loss = 0.
val_inst_acc = 0.
inst_count=0
prob = np.zeros((len(loader), n_classes))
labels = np.zeros(len(loader))
sample_size = model.k_sample
with torch.no_grad():
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device), cluster_id, label.to(device)
else:
data, label = batch
data, label = data.to(device), label.to(device)
cluster_id = None
logits, Y_prob, Y_hat, _, instance_dict = model(h=data, cluster_id=cluster_id, label=label, instance_eval=True)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
val_loss += loss.item()
instance_loss = instance_dict['instance_loss']
inst_count+=1
instance_loss_value = instance_loss.item()
val_inst_loss += instance_loss_value
inst_preds = instance_dict['inst_preds']
inst_labels = instance_dict['inst_labels']
inst_logger.log_batch(inst_preds, inst_labels)
prob[batch_idx] = Y_prob.cpu().numpy()
labels[batch_idx] = label.item()
error = calculate_error(Y_hat, label)
val_error += error
val_error /= len(loader)
val_loss /= len(loader)
if n_classes == 2:
auc = roc_auc_score(labels, prob[:, 1])
aucs = []
else:
aucs = []
binary_labels = label_binarize(labels, classes=[i for i in range(n_classes)])
for class_idx in range(n_classes):
if class_idx in labels:
fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], prob[:, class_idx])
aucs.append(calc_auc(fpr, tpr))
else:
aucs.append(float('nan'))
auc = np.nanmean(np.array(aucs))
print('\nVal Set, val_loss: {:.4f}, val_error: {:.4f}, auc: {:.4f}'.format(val_loss, val_error, auc))
if inst_count > 0:
val_inst_loss /= inst_count
for i in range(2):
acc, correct, count = inst_logger.get_summary(i)
print('class {} clustering acc {}: correct {}/{}'.format(i, acc, correct, count))
if writer:
writer.add_scalar('val/loss', val_loss, epoch)
writer.add_scalar('val/auc', auc, epoch)
writer.add_scalar('val/error', val_error, epoch)
writer.add_scalar('val/inst_loss', val_inst_loss, epoch)
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer and acc is not None:
writer.add_scalar('val/class_{}_acc'.format(i), acc, epoch)
if early_stopping:
assert results_dir
early_stopping(epoch, val_loss, model, ckpt_name = os.path.join(results_dir, "s_{}_checkpoint.pt".format(cur)))
if early_stopping.early_stop:
print("Early stopping")
return True
return False
def summary(model, loader, n_classes):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
acc_logger = Accuracy_Logger(n_classes=n_classes)
model.eval()
test_loss = 0.
test_error = 0.
all_probs = np.zeros((len(loader), n_classes))
all_labels = np.zeros(len(loader))
slide_ids = loader.dataset.slide_data['slide_id']
patient_results = {}
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device), cluster_id, label.to(device)
else:
data, label = batch
data, label = data.to(device), label.to(device)
cluster_id = None
#data, label = data.to(device), label.to(device)
slide_id = slide_ids.iloc[batch_idx]
with torch.no_grad():
logits, Y_prob, Y_hat, _, _ = model(data, cluster_id=cluster_id)
#logits, Y_prob, Y_hat, _, _ = model(data)
acc_logger.log(Y_hat, label)
probs = Y_prob.cpu().numpy()
all_probs[batch_idx] = probs
all_labels[batch_idx] = label.item()
patient_results.update({slide_id: {'slide_id': np.array(slide_id), 'prob': probs, 'label': label.item()}})
error = calculate_error(Y_hat, label)
test_error += error
test_error /= len(loader)
if n_classes == 2:
auc = roc_auc_score(all_labels, all_probs[:, 1])
aucs = []
else:
aucs = []
binary_labels = label_binarize(all_labels, classes=[i for i in range(n_classes)])
for class_idx in range(n_classes):
if class_idx in all_labels:
fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], all_probs[:, class_idx])
print(calc_auc(fpr, tpr))
aucs.append(calc_auc(fpr, tpr))
else:
print('nan')
aucs.append(float('nan'))
auc = np.nanmean(np.array(aucs))
return patient_results, test_error, auc, acc_logger
| 23,019 | 36.986799 | 163 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/utils/utils.py | import pickle
import torch
import numpy as np
import torch.nn as nn
import pdb
import torch
import numpy as np
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader, Sampler, WeightedRandomSampler, RandomSampler, SequentialSampler, sampler
import torch.optim as optim
import pdb
import torch.nn.functional as F
import math
from itertools import islice
import collections
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
class SubsetSequentialSampler(Sampler):
"""Samples elements sequentially from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
def collate_MIL(batch):
img = torch.cat([item[0] for item in batch], dim = 0)
label = torch.LongTensor([item[1] for item in batch])
return [img, label]
def collate_MIL_cluster(batch):
img = torch.cat([item[0] for item in batch], dim = 0)
cluster_ids = torch.cat([item[1] for item in batch], dim = 0).type(torch.LongTensor)
label = torch.LongTensor([item[2] for item in batch])
return [img, cluster_ids, label]
def collate_MIL_graph(batch):
img = batch[0][0]
label = torch.LongTensor([item[1] for item in batch])
#print(img, label)
return [img, label]
def collate_features(batch):
img = torch.cat([item[0] for item in batch], dim = 0)
coords = np.vstack([item[1] for item in batch])
return [img, coords]
def get_simple_loader(dataset, batch_size=1, num_workers=1):
kwargs = {'num_workers': 4, 'pin_memory': False, 'num_workers': num_workers} if device.type == "cuda" else {}
loader = DataLoader(dataset, batch_size=batch_size, sampler = sampler.SequentialSampler(dataset), collate_fn = collate_MIL, **kwargs)
return loader
def get_split_loader(split_dataset, training = False, testing = False, weighted = False, mode='cluster'):
"""
return either the validation loader or training loader
"""
if mode == 'cluster':
collate = collate_MIL_cluster
elif mode == 'graph':
collate = collate_MIL_graph # collate_MIL_graph
else:
collate = collate_MIL
kwargs = {'num_workers': 8} if device.type == "cuda" else {}
if not testing:
if training:
if weighted:
weights = make_weights_for_balanced_classes_split(split_dataset)
loader = DataLoader(split_dataset, batch_size=1, sampler = WeightedRandomSampler(weights, len(weights)), collate_fn = collate, **kwargs)
else:
loader = DataLoader(split_dataset, batch_size=1, sampler = RandomSampler(split_dataset), collate_fn = collate, **kwargs)
else:
loader = DataLoader(split_dataset, batch_size=1, sampler = SequentialSampler(split_dataset), collate_fn = collate, **kwargs)
else:
ids = np.random.choice(np.arange(len(split_dataset), int(len(split_dataset)*0.1)), replace = False)
loader = DataLoader(split_dataset, batch_size=1, sampler = SubsetSequentialSampler(ids), collate_fn = collate, **kwargs )
return loader
def get_optim(model, args):
if args.opt == "adam":
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.reg)
elif args.opt == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=args.reg)
else:
raise NotImplementedError
return optimizer
def print_network(net):
num_params = 0
num_params_train = 0
print(net)
for param in net.parameters():
n = param.numel()
num_params += n
if param.requires_grad:
num_params_train += n
print('Total number of parameters: %d' % num_params)
print('Total number of trainable parameters: %d' % num_params_train)
def generate_split(cls_ids, val_num, test_num, samples, n_splits = 5,
seed = 7, label_frac = 1.0, custom_test_ids = None):
indices = np.arange(samples).astype(int)
if custom_test_ids is not None:
indices = np.setdiff1d(indices, custom_test_ids)
np.random.seed(seed)
for i in range(n_splits):
all_val_ids = []
all_test_ids = []
sampled_train_ids = []
if custom_test_ids is not None: # pre-built test split, do not need to sample
all_test_ids.extend(custom_test_ids)
for c in range(len(val_num)):
possible_indices = np.intersect1d(cls_ids[c], indices) #all indices of this class
val_ids = np.random.choice(possible_indices, val_num[c], replace = False) # validation ids
remaining_ids = np.setdiff1d(possible_indices, val_ids) #indices of this class left after validation
all_val_ids.extend(val_ids)
if custom_test_ids is None: # sample test split
test_ids = np.random.choice(remaining_ids, test_num[c], replace = False)
remaining_ids = np.setdiff1d(remaining_ids, test_ids)
all_test_ids.extend(test_ids)
if label_frac == 1:
sampled_train_ids.extend(remaining_ids)
else:
sample_num = math.ceil(len(remaining_ids) * label_frac)
slice_ids = np.arange(sample_num)
sampled_train_ids.extend(remaining_ids[slice_ids])
yield sampled_train_ids, all_val_ids, all_test_ids
def nth(iterator, n, default=None):
if n is None:
return collections.deque(iterator, maxlen=0)
else:
return next(islice(iterator,n, None), default)
def calculate_error(Y_hat, Y):
error = 1. - Y_hat.float().eq(Y.float()).float().mean().item()
return error
def make_weights_for_balanced_classes_split(dataset):
N = float(len(dataset))
weight_per_class = [N/len(dataset.slide_cls_ids[c]) for c in range(len(dataset.slide_cls_ids))]
weight = [0] * int(N)
for idx in range(len(dataset)):
y = dataset.getlabel(idx)
weight[idx] = weight_per_class[y]
return torch.DoubleTensor(weight)
def initialize_weights(module):
for m in module.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| 6,214 | 32.413978 | 197 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/utils/eval_utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.model_mil import MIL_fc, MIL_fc_mc
from models.model_clam import CLAM_SB, CLAM_MB
import pdb
import os
import pandas as pd
from utils.utils import *
from utils.core_utils import Accuracy_Logger
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn.preprocessing import label_binarize
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
def initiate_model(args, ckpt_path):
#print('Init Model')
model_dict = {"dropout": args.drop_out, 'n_classes': args.n_classes}
if args.model_size is not None and args.model_type in ['clam_sb', 'clam_mb']:
model_dict.update({"size_arg": args.model_size})
if args.model_type =='clam_sb':
model = CLAM_SB(**model_dict)
elif args.model_type =='clam_mb':
model = CLAM_MB(**model_dict)
else: # args.model_type == 'mil'
if args.n_classes > 2:
model = MIL_fc_mc(**model_dict)
else:
model = MIL_fc(**model_dict)
#print_network(model)
ckpt = torch.load(ckpt_path)
ckpt_clean = {}
for key in ckpt.keys():
if 'instance_loss_fn' in key:
continue
ckpt_clean.update({key.replace('.module', ''):ckpt[key]})
model.load_state_dict(ckpt_clean, strict=True)
model.relocate()
model.eval()
return model
def eval(dataset, args, ckpt_path):
model = initiate_model(args, ckpt_path)
#print('Init Loaders')
import pdb
#pdb.set_trace()
loader = get_simple_loader(dataset)
patient_results, test_error, auc, df, _ = summary(model, loader, args)
print('test_error: ', test_error)
print('auc: ', auc)
return model, patient_results, test_error, auc, df
def summary(model, loader, args):
acc_logger = Accuracy_Logger(n_classes=args.n_classes)
model.eval()
test_loss = 0.
test_error = 0.
all_probs = np.zeros((len(loader), args.n_classes))
all_labels = np.zeros(len(loader))
all_preds = np.zeros(len(loader))
slide_ids = loader.dataset.slide_data['slide_id']
patient_results = {}
from tqdm import tqdm
for batch_idx, (data, label) in tqdm(enumerate(loader)):
data, label = data.to(device), label.to(device)
slide_id = slide_ids.iloc[batch_idx]
with torch.no_grad():
logits, Y_prob, Y_hat, _, results_dict = model(data)
acc_logger.log(Y_hat, label)
probs = Y_prob.cpu().numpy()
all_probs[batch_idx] = probs
all_labels[batch_idx] = label.item()
all_preds[batch_idx] = Y_hat.item()
patient_results.update({slide_id: {'slide_id': np.array(slide_id), 'prob': probs, 'label': label.item()}})
error = calculate_error(Y_hat, label)
test_error += error
del data
test_error /= len(loader)
aucs = []
if len(np.unique(all_labels)) == 1:
auc_score = -1
else:
if args.n_classes == 2:
import pdb
#pdb.set_trace()
auc_score = roc_auc_score(all_labels, all_probs[:, 1])
fpr, tpr, thresholds = roc_curve(all_labels, all_probs[:,1])
J = tpr - fpr
ix = np.argmax(J)
cutoff = thresholds[ix]
#print(cutoff)
y_pred = np.array(all_probs[:,1] > cutoff).astype(int)
tn, fp, fn, tp = confusion_matrix(all_labels, y_pred).ravel()
classification_report(all_labels, y_pred)
else:
binary_labels = label_binarize(all_labels, classes=[i for i in range(args.n_classes)])
for class_idx in range(args.n_classes):
if class_idx in all_labels:
fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], all_probs[:, class_idx])
aucs.append(auc(fpr, tpr))
else:
aucs.append(float('nan'))
if args.micro_average:
binary_labels = label_binarize(all_labels, classes=[i for i in range(args.n_classes)])
fpr, tpr, _ = roc_curve(binary_labels.ravel(), all_probs.ravel())
auc_score = auc(fpr, tpr)
else:
auc_score = np.nanmean(np.array(aucs))
results_dict = {'slide_id': slide_ids, 'Y': all_labels, 'Y_hat': all_preds}
for c in range(args.n_classes):
results_dict.update({'p_{}'.format(c): all_probs[:,c]})
import pdb
#pdb.set_trace()
df = pd.DataFrame(results_dict)
return patient_results, test_error, auc_score, df, acc_logger
| 4,650 | 33.451852 | 114 | py |
benchmarking_graph | benchmarking_graph-main/src/md.py | from functools import partial
import jax
import jax.numpy as jnp
from jax import jit, lax, value_and_grad
from jax.experimental import optimizers
from .nve import nve, nve2, nve3
# ===============================
# ===============================
def dynamics_generator(ensemble, force_fn, shift_fn, params, dt, mass,):
func = partial(force_fn, mass=mass)
init, apply = ensemble(lambda R, V: func(R, V, params), shift_fn, dt)
def f(state, runs=100, stride=10):
return solve_dynamics(
state, apply, runs=runs, stride=stride)
return init, f
def predition(R, V, params, force_fn, shift_fn, dt, mass, runs=1000, stride=10):
func = partial(force_fn, mass=mass)
init, apply = nve(lambda R, V: func(R, V, params), shift_fn, dt)
state = init(R, V, mass)
states = solve_dynamics(state, apply, runs=runs, stride=stride)
return states
def predition4(R, V, params, force_fn, shift_fn, dt, mass, runs=1000, stride=10):
func = partial(force_fn, mass=mass)
init, apply = nve4(lambda R, V: func(R, V, params), shift_fn, dt)
state = init(R, V, mass)
states = solve_dynamics(state, apply, runs=runs, stride=stride)
return states
# def predition(R, V, params, force_fn, shift_fn, dt, mass, runs=1000, stride=10):
# func = partial(force_fn, mass=mass)
# init, apply = nve(lambda R, V: func(R, V, params), shift_fn, dt)
# state = init(R, V, mass)
# states = solve_dynamics(state, apply, runs=runs, stride=stride)
# return states
def predition2(R, V, params, change_R_V, dt, mass, runs=1000, stride=10):
# func = partial(force_fn, mass=mass)
init, apply = nve2(params, change_R_V, dt)
state = init(R, V, mass)
states = solve_dynamics2(state, apply, runs=runs, stride=stride)
return states
def predition3(R, V, params, change_Acc, dt, mass, runs=1000, stride=10):
# func = partial(force_fn, mass=mass)
init, apply = nve3(params, change_Acc, dt)
state = init(R, V, mass)
states = solve_dynamics(state, apply, runs=runs, stride=stride)
return states
def solve_dynamics(init_state, apply, runs=100, stride=10):
step = jit(lambda i, state: apply(state))
def f(state):
y = jax.lax.fori_loop(0, stride, step, state)
return y, y
def func(state, i): return f(state)
@jit
def scan(init_state):
return jax.lax.scan(func, init_state, jnp.array(range(runs)))
final_state, traj = scan(init_state)
return traj
def solve_dynamics2(init_state, apply, runs=100, stride=10):
step = jit(lambda i, state: apply(state))
def func(state, i):
x = apply(state)
return x,x
@jit
def scan(init_state):
return jax.lax.scan(func, init_state, jnp.array(range(runs)))
final_state, traj = scan(init_state)
return traj
# def solve_dynamics(state, apply, runs=100, stride=10):
# step = jit(lambda i, state: apply(state))
# states = [state]
# for i in range(runs):
# state = lax.fori_loop(0, stride, step, state)
# states += [state]
# return states
# def solve_dynamics(state, apply, runs=100, stride=10):
# step = jit(lambda i, state: apply(state))
# states = [state]
# for i in range(runs):
# state = lax.fori_loop(0, stride, step, state)
# states += [state]
# return states
def minimize(R, params, shift, pot_energy_fn, steps=10, gtol=1.0e-7, lr=1.0e-3):
opt_init, opt_update, get_params = optimizers.adam(lr)
opt_state = opt_init(R)
def gloss2(R):
return value_and_grad(lambda R: pot_energy_fn(R, params))(R)
print(f"Step\tPot. Eng.\t\tTolerance")
for i in range(steps):
v, grads_ = gloss2(R)
grads = jnp.clip(jnp.nan_to_num(grads_), a_min=-1.0, a_max=1.0)
opt_state = opt_update(0, grads, opt_state)
R_ = get_params(opt_state)
dR = R_ - R
R, _ = shift(R, dR, R)
if i % 100 == 0:
_tol = jnp.square(grads).sum()
print(f"{i}\t{v}\t\t{_tol}")
if _tol < gtol:
print(f"gtol reached: {_tol} which is < {gtol}")
break
return R
def _reflective(R, dR, V, _min=0.0, _max=4.0):
V_ = V
R_ = R
dR_ = jnp.maximum(jnp.minimum(dR, (_max-_min)/2), -(_max-_min)/2)
V_ = jnp.where(R + dR_ < _min, -V, V)
V_ = jnp.where(R + dR_ > _max, -V, V_)
R_ = jnp.where(R + dR_ < _min, 2*_min - (R+dR_), R+dR_)
R_ = jnp.where(R + dR_ > _max, 2*_max - (R+dR_), R_)
return R_, V_
def _periodic(R, dR, V, _min=0.0, _max=4.0):
V_ = V
R_ = R
dR_ = jnp.maximum(jnp.minimum(dR, (_max-_min)/2), -(_max-_min)/2)
R_ = jnp.where(R + dR_ < _min, _max - _min + (R+dR_), R+dR_)
R_ = jnp.where(R + dR_ > _max, _min - _max + (R+dR_), R_)
return R_, V_
def _open(R, dR, V):
"""R -> R + dR
V -> V
:param R: Position
:type R: array
:param dR: Displacement
:type dR: array
:param V: Velocity
:type V: array
:return: (R+dR, V)
:rtype: tuple
"""
return R+dR, V
shift = _open
def displacement(a, b):
"""A - B
:param a: Vector A
:type a: array
:param b: Vector B
:type b: array
:return: a-b
:rtype: array
"""
return a-b
| 5,251 | 27.699454 | 83 | py |
benchmarking_graph | benchmarking_graph-main/src/hamiltonian.py | import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from jax.experimental import ode
# from shadow.plot import panel
def hamiltonian(x, p, params):
"""
hamiltonian calls lnn._H
x: Vector
p: Vector
"""
return None
def ps(*args):
for i in args:
print(i.shape)
def get_zdot_lambda(N, Dim, hamiltonian, drag=None, constraints=None, external_force=None):
dim = N*Dim
I = jnp.eye(dim)
J = jnp.zeros((2*dim, 2*dim))
J = jax.ops.index_update(J, jax.ops.index[:dim, dim:], I)
J = jax.ops.index_update(J, jax.ops.index[dim:, :dim], -I)
J2 = jnp.zeros((2*dim, 2*dim))
J2 = jax.ops.index_update(J2, jax.ops.index[:dim, :dim], I)
J2 = jax.ops.index_update(J2, jax.ops.index[dim:, dim:], I)
def dH_dz(x, p, params):
dH_dx = jax.grad(hamiltonian, 0)(x, p, params)
dH_dp = jax.grad(hamiltonian, 1)(x, p, params)
return jnp.hstack([dH_dx.flatten(), dH_dp.flatten()])
if drag is None:
def drag(x, p, params):
return 0.0
def dD_dz(x, p, params):
dD_dx = jax.grad(drag, 0)(x, p, params)
dD_dp = jax.grad(drag, 1)(x, p, params)
return jnp.hstack([dD_dx.flatten(), dD_dp.flatten()])
if external_force is None:
def external_force(x, p, params):
return 0.0*p
if constraints is None:
def constraints(x, p, params):
return jnp.zeros((1, 2*dim))
def fn_zdot(x, p, params):
dH = dH_dz(x, p, params)
dD = J2 @ dD_dz(x, p, params)
dD = - J @ dD
F = jnp.hstack(
[jnp.zeros(dim), external_force(x, p, params).flatten()])
F = -J @ F
S = dH + J2 @ dD + F
A = constraints(x, p, params).reshape(-1, 2*dim)
Aᵀ = A.T
INV = jnp.linalg.pinv(A @ J @ Aᵀ)
λ = -INV @ A @ J @ S
zdot = J @ (S + Aᵀ @ λ)
return zdot.reshape(2*N, Dim)
def lambda_force(x, p, params):
dH = dH_dz(x, p, params)
dD = J2 @ dD_dz(x, p, params)
dD = - J @ dD
F = jnp.hstack(
[jnp.zeros(dim), external_force(x, p, params).flatten()])
F = -J @ F
S = dH + J2 @ dD + F
A = constraints(x, p, params).reshape(-1, 2*dim)
Aᵀ = A.T
INV = jnp.linalg.pinv(A @ J @ Aᵀ)
λ = -INV @ A @ J @ S
return (J @ Aᵀ @ λ).reshape(2*N, Dim)
return fn_zdot, lambda_force
def get_constraints(N, Dim, phi_, mass=None):
if mass is None:
mass = 1.0
def phi(x): return phi_(x.reshape(N, Dim))
def phidot(x, p):
Dphi = jax.jacobian(phi)(x.flatten())
pm = (p.flatten() / mass)
return Dphi @ pm
def psi(z):
x, p = jnp.split(z, 2)
return jnp.vstack([phi(x), phidot(x, p)])
def Dpsi(z):
return jax.jacobian(psi)(z)
def fn(x, p, params):
z = jnp.vstack([x, p])
return Dpsi(z)
return fn
def z(x, p): return jnp.vstack([x, p])
def _T(p, mass=jnp.array([1.0])):
if len(mass) != len(p):
mass = mass[0]*jnp.ones((len(p)))
out = (1/mass)*jnp.square(p).sum(axis=1)
return 0.5*out.sum()
def SPRING(x, stiffness=1.0, length=1.0):
"""Linear spring, v=0.5kd^2.
:param x: Inter-particle distance
:type x: float
:param stiffness: Spring stiffness constant, defaults to 1.0
:type stiffness: float, optional
:param length: Equillibrium length, defaults to 1.0
:type length: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return 0.5*stiffness*(x_ - length)**2
| 3,630 | 25.698529 | 91 | py |
benchmarking_graph | benchmarking_graph-main/src/utils.py | import importlib
from functools import partial
import jax
import jax.numpy as jnp
import jax_md
import numpy as np
from jax import grad, jit, random, vmap
from jax_md import smap
from . import lnn, models
def colnum(i, j, N):
"""Gives linear index for upper triangle matrix.
"""
assert (j >= i), "j >= i, Upper Triangle indices."
assert (i < N) and (j < N), "i<N & j<N where i and \
j are atom type and N is number of species."
return int(i*N - i*(i-1)/2 + j-i + 1)
def pair2mat(fn, displacement_or_metric, species, parameters,
ignore_unused_parameters=True,
reduce_axis=None,
keepdims=False,
use_onehot=False,
**kwargs):
kwargs, param_combinators = smap._split_params_and_combinators(kwargs)
merge_dicts = partial(jax_md.util.merge_dicts,
ignore_unused_parameters=ignore_unused_parameters)
d = lnn.t1(displacement=displacement_or_metric)
if species is None:
def fn_mapped(R: smap.Array, **dynamic_kwargs) -> smap.Array:
_kwargs = merge_dicts(kwargs, dynamic_kwargs)
_kwargs = smap._kwargs_to_parameters(
None, _kwargs, param_combinators)
dr = d(R)
# NOTE(schsam): Currently we place a diagonal mask no matter what function
# we are mapping. Should this be an option?
return smap.high_precision_sum(fn(dr, **_kwargs),
axis=reduce_axis, keepdims=keepdims) * smap.f32(0.5)
elif jax_md.util.is_array(species):
species = np.array(species)
smap._check_species_dtype(species)
species_count = int(np.max(species) + 1)
if reduce_axis is not None or keepdims:
raise ValueError
def onehot(i, j, N):
col = colnum(i, j, species_count)
oneh = jnp.zeros(
(N, colnum(species_count-1, species_count-1, species_count)))
oneh = jax.ops.index_update(oneh, jnp.index_exp[:, int(col-1)], 1)
return oneh
def pot_pair_wise():
if use_onehot:
def func(i, j, dr, **s_kwargs):
dr = jnp.linalg.norm(dr, axis=1, keepdims=True)
ONEHOT = onehot(i, j, len(dr))
h = vmap(models.forward_pass, in_axes=(
None, 0))(parameters["ONEHOT"], ONEHOT)
dr = jnp.concatenate([h, dr], axis=1)
return smap.high_precision_sum(
fn(dr, params=parameters["PEF"], **s_kwargs))
return func
else:
def func(i, j, dr, **s_kwargs):
return smap.high_precision_sum(
fn(dr, **parameters[i][j-i], **s_kwargs))
return func
pot_pair_wise_fn = pot_pair_wise()
def fn_mapped(R, **dynamic_kwargs):
U = smap.f32(0.0)
for i in range(species_count):
for j in range(i, species_count):
_kwargs = merge_dicts(kwargs, dynamic_kwargs)
s_kwargs = smap._kwargs_to_parameters(
(i, j), _kwargs, param_combinators)
Ra = R[species == i]
Rb = R[species == j]
if j == i:
dr = d(Ra)
dU = pot_pair_wise_fn(i, j, dr, **s_kwargs)
U = U + smap.f32(0.5) * dU
else:
dr = vmap(vmap(displacement_or_metric, in_axes=(0, None)), in_axes=(
None, 0))(Ra, Rb).reshape(-1, Ra.shape[1])
dU = pot_pair_wise_fn(i, j, dr, **s_kwargs)
U = U + dU
return U
return fn_mapped
def map_parameters(fn, displacement, species, parameters, **kwargs):
mapped_fn = lnn.MAP(fn)
def f(x, *args, **kwargs):
out = mapped_fn(x, *args, **kwargs)
return out
return pair2mat(f, displacement, species, parameters, **kwargs)
class VV_unroll():
def __init__(self, R, dt=1):
self.R = R
self.dt = dt
def get_position(self):
r = self.R[1:-1]
return r
def get_acceleration(self, dt=None):
r = self.R[1:-1]
r_minus = self.R[:-2]
r_plus = self.R[2:]
if dt is not None:
return (r_plus + r_minus - 2*r)/dt**2
else:
return (r_plus + r_minus - 2*r)/self.dt**2
def get_velocity(self, dt=None):
r_minus = self.R[:-2]
r_plus = self.R[2:]
if dt is not None:
return (r_plus - r_minus)/2/dt
else:
return (r_plus - r_minus)/2/self.dt
def get_kin(self, dt=None):
return self.get_position(), self.get_velocity(dt=dt), self.get_acceleration(dt=dt)
class States:
def __init__(self, state=None, const_size=True):
if state is None:
self.isarrays = False
self.const_size = const_size
self.position = []
self.velocity = []
self.force = []
if self.const_size:
self.mass = None
else:
self.mass = []
else:
self.position = [state.position]
self.velocity = [state.velocity]
self.force = [state.force]
if self.const_size:
self.mass = state.mass
else:
self.mass = [state.mass]
def add(self, state):
self.position += [state.position]
self.velocity += [state.velocity]
self.force += [state.force]
if self.const_size:
if self.mass is None:
self.mass = state.mass
else:
self.mass += [state.mass]
def fromlist(self, states, const_size=True):
out = States(const_size=const_size)
for state in states:
out.add(state)
return out
def makearrays(self):
if not(self.isarrays):
self.position = jnp.array(self.position)
self.velocity = jnp.array(self.velocity)
self.force = jnp.array(self.force)
self.mass = jnp.array([self.mass])
self.isarrays = True
def get_array(self):
self.makearrays()
return self.position, self.velocity, self.force
def get_mass(self):
self.makearrays()
return self.mass
def get_kin(self):
self.makearrays()
if self.const_size:
acceleration = self.force/self.mass.reshape(1, self.mass.shape)
else:
acceleration = self.force/self.mass
return self.position, self.velocity, acceleration
class States_modified:
def __init__(self, state=None, const_size=True):
if state is None:
self.isarrays = False
self.const_size = const_size
self.position = []
self.velocity = []
self.force = []
self.change_position = []
self.change_velocity = []
if self.const_size:
self.mass = None
else:
self.mass = []
else:
self.position = [state.position]
self.velocity = [state.velocity]
self.force = [state.force]
self.change_position = [state.change_position]
self.change_velocity = [state.change_velocity]
if self.const_size:
self.mass = state.mass
else:
self.mass = [state.mass]
def add(self, state):
self.position += [state.position]
self.velocity += [state.velocity]
self.force += [state.force]
self.change_position += [state.change_position]
self.change_velocity += [state.change_velocity]
if self.const_size:
if self.mass is None:
self.mass = state.mass
else:
self.mass += [state.mass]
def fromlist(self, states, const_size=True):
out = States_modified(const_size=const_size)
for state in states:
out.add(state)
return out
def makearrays(self):
if not(self.isarrays):
self.position = jnp.array(self.position)
self.velocity = jnp.array(self.velocity)
self.force = jnp.array(self.force)
self.mass = jnp.array([self.mass])
self.change_position = jnp.array(self.change_position)
self.change_velocity = jnp.array(self.change_velocity)
self.isarrays = True
def get_array(self):
self.makearrays()
return self.position, self.velocity, self.force, self.change_position, self.change_velocity
def get_mass(self):
self.makearrays()
return self.mass
def get_kin(self):
self.makearrays()
if self.const_size:
acceleration = self.force/self.mass.reshape(1, self.mass.shape)
else:
acceleration = self.force/self.mass
return self.position, self.velocity, acceleration
def reload(list_of_modules):
for module in list_of_modules:
try:
print("Reload: ", module.__name__)
importlib.reload(module)
except:
print("Reimports failed.")
def timeit(stmt, setup="", number=5):
from timeit import timeit
return timeit(stmt=stmt, setup=setup, number=number)
def factorial(n):
if n == 0:
return 1
else:
return n*factorial(n-1)
def nCk(n, k):
return factorial(n)//factorial(k)//factorial(n-k)
| 9,647 | 31.928328 | 99 | py |
benchmarking_graph | benchmarking_graph-main/src/graph.py | # Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of Graph Neural Network models."""
import functools
import sunau
from typing import Any, Callable, Iterable, Mapping, Optional, Union
import jax
import jax.numpy as jnp
import jax.tree_util as tree
import numpy as np
from frozendict import frozendict
from jax import vmap
from jraph._src import graph as gn_graph
from jraph._src import utils
from .models import SquarePlus, forward_pass
jax.tree_util.register_pytree_node(
frozendict,
flatten_func=lambda s: (tuple(s.values()), tuple(s.keys())),
unflatten_func=lambda k, xs: frozendict(zip(k, xs)))
# As of 04/2020 pytype doesn't support recursive types.
# pytype: disable=not-supported-yet
ArrayTree = Union[jnp.ndarray,
Iterable['ArrayTree'], Mapping[Any, 'ArrayTree']]
# All features will be an ArrayTree.
NodeFeatures = EdgeFeatures = SenderFeatures = ReceiverFeatures = Globals = ArrayTree
# Signature:
# (edges of each node to be aggregated, segment ids, number of segments) ->
# aggregated edges
AggregateEdgesToNodesFn = Callable[
[EdgeFeatures, jnp.ndarray, int], NodeFeatures]
# Signature:
# (nodes of each graph to be aggregated, segment ids, number of segments) ->
# aggregated nodes
AggregateNodesToGlobalsFn = Callable[[NodeFeatures, jnp.ndarray, int],
Globals]
# Signature:
# (edges of each graph to be aggregated, segment ids, number of segments) ->
# aggregated edges
AggregateEdgesToGlobalsFn = Callable[[EdgeFeatures, jnp.ndarray, int],
Globals]
# Signature:
# (edge features, sender node features, receiver node features, globals) ->
# attention weights
AttentionLogitFn = Callable[
[EdgeFeatures, SenderFeatures, ReceiverFeatures, Globals], ArrayTree]
# Signature:
# (edge features, weights) -> edge features for node update
AttentionReduceFn = Callable[[EdgeFeatures, ArrayTree], EdgeFeatures]
# Signature:
# (edges to be normalized, segment ids, number of segments) ->
# normalized edges
AttentionNormalizeFn = Callable[[EdgeFeatures, jnp.ndarray, int], EdgeFeatures]
# Signature:
# (edge features, sender node features, receiver node features, globals) ->
# updated edge features
GNUpdateEdgeFn = Callable[
[EdgeFeatures, SenderFeatures, ReceiverFeatures, Globals], EdgeFeatures]
# Signature:
# (node features, outgoing edge features, incoming edge features,
# globals) -> updated node features
GNUpdateNodeFn = Callable[
[NodeFeatures, SenderFeatures, ReceiverFeatures, Globals], NodeFeatures]
GNUpdateGlobalFn = Callable[[NodeFeatures, EdgeFeatures, Globals], Globals]
# Signature:
# (node features, outgoing edge features, incoming edge features,
# globals) -> updated node features
# V: Potential energy of edge
GN_to_V_Fn = Callable[[EdgeFeatures, NodeFeatures], float]
GN_to_T_Fn = Callable[[NodeFeatures], float]
def GNNet(
V_fn: GN_to_V_Fn,
initial_edge_embed_fn: Optional[GNUpdateEdgeFn],
initial_node_embed_fn: Optional[GNUpdateEdgeFn],
update_edge_fn: Optional[GNUpdateEdgeFn],
update_node_fn: Optional[GNUpdateNodeFn],
T_fn: GN_to_T_Fn = None,
update_global_fn: Optional[GNUpdateGlobalFn] = None,
aggregate_nodes_for_globals_fn: AggregateNodesToGlobalsFn = utils
.segment_sum,
aggregate_edges_for_globals_fn: AggregateEdgesToGlobalsFn = utils
.segment_sum,
attention_logit_fn: Optional[AttentionLogitFn] = None,
attention_normalize_fn: Optional[AttentionNormalizeFn] = utils
.segment_softmax,
attention_reduce_fn: Optional[AttentionReduceFn] = None,
N=1,):
"""Returns a method that applies a configured GraphNetwork.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
than the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Example usage::
gn = GraphNetwork(update_edge_function,
update_node_function, **kwargs)
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
graph = gn(graph)
Args:
update_edge_fn: function used to update the edges or None to deactivate edge
updates.
update_node_fn: function used to update the nodes or None to deactivate node
updates.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals.
attention_logit_fn: function used to calculate the attention weights or
None to deactivate attention mechanism.
attention_normalize_fn: function used to normalize raw attention logits or
None if attention mechanism is not active.
attention_reduce_fn: function used to apply weights to the edge features or
None if attention mechanism is not active.
Returns:
A method that applies the configured GraphNetwork.
"""
def not_both_supplied(x, y): return (
x != y) and ((x is None) or (y is None))
if not_both_supplied(attention_reduce_fn, attention_logit_fn):
raise ValueError(('attention_logit_fn and attention_reduce_fn must both be'
' supplied.'))
def _ApplyGraphNet(graph):
"""Applies a configured GraphNetwork to a graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Many popular Graph Neural Networks can be implemented as special cases of
GraphNets, for more information please see the paper.
Args:
graph: a `GraphsTuple` containing the graph.
Returns:
Updated `GraphsTuple`.
"""
# pylint: disable=g-long-lambda
nodes, edges, receivers, senders, globals_, n_node, n_edge = graph
# Equivalent to jnp.sum(n_node), but jittable
# calculate number of nodes in graph
sum_n_node = tree.tree_leaves(nodes)[0].shape[0]
# calculate number of edges in graph
sum_n_edge = senders.shape[0]
# check if all all node array are of same length = number of nodes
if not tree.tree_all(
tree.tree_map(lambda n: n.shape[0] == sum_n_node, nodes)):
raise ValueError(
'All node arrays in nest must contain the same number of nodes.')
# Initial sent info
sent_attributes = tree.tree_map(lambda n: n[senders], nodes)
# Initial received info
received_attributes = tree.tree_map(lambda n: n[receivers], nodes)
# Here we scatter the global features to the corresponding edges,
# giving us tensors of shape [num_edges, global_feat].
# i.e create an array per edge for global attributes
global_edge_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_edge, axis=0, total_repeat_length=sum_n_edge), globals_)
# Here we scatter the global features to the corresponding nodes,
# giving us tensors of shape [num_nodes, global_feat].
# i.e create an array per node for global attributes
global_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_node, axis=0, total_repeat_length=sum_n_node), globals_)
# apply initial edge embeddings
if initial_edge_embed_fn:
edges = initial_edge_embed_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
# apply initial node embeddings
if initial_node_embed_fn:
nodes = initial_node_embed_fn(nodes, sent_attributes,
received_attributes, global_attributes)
# Now perform message passing for N times
for pass_i in range(N):
if attention_logit_fn:
logits = attention_logit_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
tree_calculate_weights = functools.partial(
attention_normalize_fn,
segment_ids=receivers,
num_segments=sum_n_node)
weights = tree.tree_map(tree_calculate_weights, logits)
edges = attention_reduce_fn(edges, weights)
if update_node_fn:
nodes = update_node_fn(
nodes, edges, senders, receivers,
global_attributes, sum_n_node)
if update_edge_fn:
senders_attributes = tree.tree_map(
lambda n: n[senders], nodes)
receivers_attributes = tree.tree_map(
lambda n: n[receivers], nodes)
edges = update_edge_fn(edges, senders_attributes, receivers_attributes,
global_edge_attributes, pass_i == N-1)
if update_global_fn:
n_graph = n_node.shape[0]
graph_idx = jnp.arange(n_graph)
# To aggregate nodes and edges from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# For example, if you have `n_node=[1,2]`, we construct the tensor
# [0, 1, 1]. We then do the same for edges.
node_gr_idx = jnp.repeat(
graph_idx, n_node, axis=0, total_repeat_length=sum_n_node)
edge_gr_idx = jnp.repeat(
graph_idx, n_edge, axis=0, total_repeat_length=sum_n_edge)
# We use the aggregation function to pool the nodes/edges per graph.
node_attributes = tree.tree_map(
lambda n: aggregate_nodes_for_globals_fn(
n, node_gr_idx, n_graph),
nodes)
edge_attribtutes = tree.tree_map(
lambda e: aggregate_edges_for_globals_fn(
e, edge_gr_idx, n_graph),
edges)
# These pooled nodes are the inputs to the global update fn.
globals_ = update_global_fn(
node_attributes, edge_attribtutes, globals_)
V = 0.0
if V_fn is not None:
V += V_fn(edges, nodes)
T = 0.0
if T_fn is not None:
T += T_fn(nodes)
# pylint: enable=g-long-lambda
return gn_graph.GraphsTuple(
nodes=nodes,
edges=edges,
receivers=receivers,
senders=senders,
globals=globals_,
n_node=n_node,
n_edge=n_edge), V, T
return _ApplyGraphNet
# Signature:
# edge features -> embedded edge features
EmbedEdgeFn = Callable[[EdgeFeatures], EdgeFeatures]
# Signature:
# node features -> embedded node features
EmbedNodeFn = Callable[[NodeFeatures], NodeFeatures]
# Signature:
# globals features -> embedded globals features
EmbedGlobalFn = Callable[[Globals], Globals]
def get_fully_connected_senders_and_receivers(
num_particles: int, self_edges: bool = False,
):
"""Returns senders and receivers for fully connected particles."""
particle_indices = np.arange(num_particles)
senders, receivers = np.meshgrid(particle_indices, particle_indices)
senders, receivers = senders.flatten(), receivers.flatten()
if not self_edges:
mask = senders != receivers
senders, receivers = senders[mask], receivers[mask]
return senders, receivers
def cal_graph(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos = jnp.hstack([emb, nodes["position"]])
emb_vel = jnp.hstack(
[fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
return frozendict({"node_embed": emb,
"node_pos_embed": emb_pos,
"node_vel_embed": emb_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
if useonlyedge:
def edge_node_to_V_fn(edges, nodes):
vij = ff1(edges["edge_embed"])
# print(vij, edges["eij"])
return vij.sum()
else:
def edge_node_to_V_fn(edges, nodes):
vij = ff1(edges["edge_embed"]).sum()
vi = 0
vi = vi + ff2(nodes["node_embed"]).sum()
vi = vi + ff3(nodes["node_pos_embed"]).sum()
return vij + vi
def node_to_T_fn(nodes):
return ke(nodes["node_vel_embed"]).sum()
if not(useT):
node_to_T_fn = None
Net = GNNet(N=mpass,
V_fn=edge_node_to_V_fn,
T_fn=node_to_T_fn,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
return Net(graph)
def GNNet_modified(
#V_fn: GN_to_V_Fn,
initial_edge_embed_fn: Optional[GNUpdateEdgeFn],
initial_node_embed_fn: Optional[GNUpdateEdgeFn],
update_edge_fn: Optional[GNUpdateEdgeFn],
update_node_fn: Optional[GNUpdateNodeFn],
T_fn: GN_to_T_Fn = None,
update_global_fn: Optional[GNUpdateGlobalFn] = None,
aggregate_nodes_for_globals_fn: AggregateNodesToGlobalsFn = utils
.segment_sum,
aggregate_edges_for_globals_fn: AggregateEdgesToGlobalsFn = utils
.segment_sum,
attention_logit_fn: Optional[AttentionLogitFn] = None,
attention_normalize_fn: Optional[AttentionNormalizeFn] = utils
.segment_softmax,
attention_reduce_fn: Optional[AttentionReduceFn] = None,
N=1,):
"""Returns a method that applies a configured GraphNetwork.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
than the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Example usage::
gn = GraphNetwork(update_edge_function,
update_node_function, **kwargs)
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
graph = gn(graph)
Args:
update_edge_fn: function used to update the edges or None to deactivate edge
updates.
update_node_fn: function used to update the nodes or None to deactivate node
updates.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals.
attention_logit_fn: function used to calculate the attention weights or
None to deactivate attention mechanism.
attention_normalize_fn: function used to normalize raw attention logits or
None if attention mechanism is not active.
attention_reduce_fn: function used to apply weights to the edge features or
None if attention mechanism is not active.
Returns:
A method that applies the configured GraphNetwork.
"""
def not_both_supplied(x, y): return (
x != y) and ((x is None) or (y is None))
if not_both_supplied(attention_reduce_fn, attention_logit_fn):
raise ValueError(('attention_logit_fn and attention_reduce_fn must both be'
' supplied.'))
def _ApplyGraphNet(graph):
"""Applies a configured GraphNetwork to a graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Many popular Graph Neural Networks can be implemented as special cases of
GraphNets, for more information please see the paper.
Args:
graph: a `GraphsTuple` containing the graph.
Returns:
Updated `GraphsTuple`.
"""
# pylint: disable=g-long-lambda
nodes, edges, receivers, senders, globals_, n_node, n_edge = graph
# Equivalent to jnp.sum(n_node), but jittable
# calculate number of nodes in graph
sum_n_node = tree.tree_leaves(nodes)[0].shape[0]
# calculate number of edges in graph
sum_n_edge = senders.shape[0]
# check if all all node array are of same length = number of nodes
if not tree.tree_all(
tree.tree_map(lambda n: n.shape[0] == sum_n_node, nodes)):
raise ValueError(
'All node arrays in nest must contain the same number of nodes.')
# Initial sent info
sent_attributes = tree.tree_map(lambda n: n[senders], nodes)
# Initial received info
received_attributes = tree.tree_map(lambda n: n[receivers], nodes)
# Here we scatter the global features to the corresponding edges,
# giving us tensors of shape [num_edges, global_feat].
# i.e create an array per edge for global attributes
global_edge_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_edge, axis=0, total_repeat_length=sum_n_edge), globals_)
# Here we scatter the global features to the corresponding nodes,
# giving us tensors of shape [num_nodes, global_feat].
# i.e create an array per node for global attributes
global_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_node, axis=0, total_repeat_length=sum_n_node), globals_)
# apply initial edge embeddings
if initial_edge_embed_fn:
edges = initial_edge_embed_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
# apply initial node embeddings
if initial_node_embed_fn:
nodes = initial_node_embed_fn(nodes, sent_attributes,
received_attributes, global_attributes)
# Now perform message passing for N times
for pass_i in range(N):
if attention_logit_fn:
logits = attention_logit_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
tree_calculate_weights = functools.partial(
attention_normalize_fn,
segment_ids=receivers,
num_segments=sum_n_node)
weights = tree.tree_map(tree_calculate_weights, logits)
edges = attention_reduce_fn(edges, weights)
if update_node_fn:
nodes = update_node_fn(
nodes, edges, senders, receivers,
global_attributes, sum_n_node)
if update_edge_fn:
senders_attributes = tree.tree_map(
lambda n: n[senders], nodes)
receivers_attributes = tree.tree_map(
lambda n: n[receivers], nodes)
edges = update_edge_fn(edges, senders_attributes, receivers_attributes,
global_edge_attributes, pass_i == N-1)
if update_global_fn:
n_graph = n_node.shape[0]
graph_idx = jnp.arange(n_graph)
# To aggregate nodes and edges from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# For example, if you have `n_node=[1,2]`, we construct the tensor
# [0, 1, 1]. We then do the same for edges.
node_gr_idx = jnp.repeat(
graph_idx, n_node, axis=0, total_repeat_length=sum_n_node)
edge_gr_idx = jnp.repeat(
graph_idx, n_edge, axis=0, total_repeat_length=sum_n_edge)
# We use the aggregation function to pool the nodes/edges per graph.
node_attributes = tree.tree_map(
lambda n: aggregate_nodes_for_globals_fn(
n, node_gr_idx, n_graph),
nodes)
edge_attribtutes = tree.tree_map(
lambda e: aggregate_edges_for_globals_fn(
e, edge_gr_idx, n_graph),
edges)
# These pooled nodes are the inputs to the global update fn.
globals_ = update_global_fn(
node_attributes, edge_attribtutes, globals_)
# V = 0.0
# if V_fn is not None:
# V += V_fn(edges, nodes)
if T_fn is not None:
ds = T_fn(nodes)
# pylint: enable=g-long-lambda
return gn_graph.GraphsTuple(
nodes=nodes,
edges=edges,
receivers=receivers,
senders=senders,
globals=globals_,
n_node=n_node,
n_edge=n_edge), ds
return _ApplyGraphNet
def cal_graph_modified(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus, timestep=0.01):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
#emb = fb(eij)
emb_m = fb(jnp.array(dr))
return frozendict({"edge_embed": emb_m, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
#emb = fne(ohe)
#emb_pos = jnp.hstack([emb, nodes["position"]])
#emb_vel = jnp.hstack(
# [fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
emb_v = fne(jnp.hstack([ohe, nodes["position"], nodes["velocity"]]))
return frozendict({"node_embed": emb_v,
#"node_pos_embed": emb_pos,
#"node_vel_embed": emb_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
# if useonlyedge:
# def edge_node_to_V_fn(edges, nodes):
# vij = ff1(edges["edge_embed"])
# # print(vij, edges["eij"])
# return vij.sum()
# else:
# def edge_node_to_V_fn(edges, nodes):
# vij = ff1(edges["edge_embed"]).sum()
# vi = 0
# vi = vi + ff2(nodes["node_embed"]).sum()
# vi = vi + ff3(nodes["node_pos_embed"]).sum()
# return vij + vi
def node_to_T_fn(nodes):
#return ke(nodes["node_vel_embed"]).sum()
# print(nodes["node_embed"].shape[0])
timecolumn = jnp.full((nodes["node_embed"].shape[0],1),timestep)
return ke(jnp.hstack([nodes["node_embed"],timecolumn]))
# if not(useT):
# node_to_T_fn = None
Net = GNNet_modified(N=mpass,
T_fn = node_to_T_fn,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
return Net(graph)
def cal_graph_modified_split(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus, timestep=0.01):
fb_p_params = params["fb_p"]
fb_v_params = params["fb_v"]
fne_p_params = params["fne_p"]
fne_v_params = params["fne_v"]
fneke_params = params["fneke"]
fv_p_params = params["fv_p"]
fv_v_params = params["fv_v"]
fe_p_params = params["fe_p"]
fe_v_params = params["fe_v"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_p_params = params["ke_p"]
ke_v_params = params["ke_v"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne_p(n):
def fn(ni):
out = forward_pass(fne_p_params, ni, activation_fn=lambda x:x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fne_v(n):
def fn(ni):
out = forward_pass(fne_v_params, ni, activation_fn=lambda x:x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb_p(e):
def fn(eij):
out = forward_pass(fb_p_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fb_v(e):
def fn(eij):
out = forward_pass(fb_v_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv_p(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_p_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fv_v(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_v_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
# def fe(e, s, r):
# def fn(hi, hj):
# c2ij = hi * hj
# out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
# return out
# out = e + vmap(fn, in_axes=(0, 0))(s, r)
# return out
def fe_p(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_p_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def fe_v(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_v_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke_p(n):
def fn(ni):
out = forward_pass(ke_p_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke_v(n):
def fn(ni):
out = forward_pass(ke_v_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
#emb = fb(eij)
emb_p = fb_p(jnp.array(dr))
emb_v = fb_v(jnp.array(dr))
return frozendict({"edge_embed_p": emb_p, "edge_embed_v": emb_v,"eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
#emb = fne(ohe)
#emb_pos = jnp.hstack([emb, nodes["position"]])
#emb_vel = jnp.hstack(
# [fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
emb_p = fne_p(jnp.hstack([ohe, nodes["position"]]))
emb_v = fne_v(jnp.hstack([ohe, nodes["velocity"]]))
return frozendict({"node_embed_p": emb_p,
"node_embed_v": emb_v,
#"node_pos_embed": emb_pos,
#"node_vel_embed": emb_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb_p = fv_p(nodes["node_embed_p"], edges["edge_embed_p"],
senders, receivers, sum_n_node)
emb_v = fv_v(nodes["node_embed_v"], edges["edge_embed_v"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed_p": emb_p, "node_embed_v": emb_v})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb_p = fe_p(edges["edge_embed_p"], senders["node_embed_p"],
receivers["node_embed_p"])
emb_v = fe_v(edges["edge_embed_v"], senders["node_embed_v"],
receivers["node_embed_v"])
if last_step:
if eorder is not None:
emb_p = (emb_p + fe_p(edges["edge_embed_p"][eorder],
receivers["node_embed_p"], senders["node_embed_p"])) / 2
emb_v = (emb_v + fe_v(edges["edge_embed_v"][eorder],
receivers["node_embed_v"], senders["node_embed_v"])) / 2
return frozendict({"edge_embed_p": emb_p,"edge_embed_v": emb_v, "eij": edges["eij"]})
# if useonlyedge:
# def edge_node_to_V_fn(edges, nodes):
# vij = ff1(edges["edge_embed"])
# # print(vij, edges["eij"])
# return vij.sum()
# else:
# def edge_node_to_V_fn(edges, nodes):
# vij = ff1(edges["edge_embed"]).sum()
# vi = 0
# vi = vi + ff2(nodes["node_embed"]).sum()
# vi = vi + ff3(nodes["node_pos_embed"]).sum()
# return vij + vi
def node_to_T_fn(nodes):
#return ke(nodes["node_vel_embed"]).sum()
# print(nodes["node_embed"].shape[0])
#timecolumn = jnp.full((nodes["node_embed"].shape[0],1),timestep)
return jnp.hstack([ke_p(nodes["node_embed_p"]), ke_v(nodes["node_embed_v"])])
# if not(useT):
# node_to_T_fn = None
Net = GNNet_modified(N=mpass,
T_fn = node_to_T_fn,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
return Net(graph)
def cal_graph_modified_input(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus, timestep=0.01):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
#emb_pos = jnp.hstack([emb, nodes["position"]])
#emb_vel = jnp.hstack(
# [fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
#emb_v = jnp.hstack([emb, nodes["position"], nodes["velocity"]])
emb_v = jnp.hstack(
[emb, nodes["position"], jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
return frozendict({"node_embed": emb_v,
#"node_pos_embed": emb_pos,
#"node_vel_embed": emb_vel,
})
# type_of_node = nodes["type"]
# ohe = onehot(type_of_node)
# emb = fne(ohe)
# emb_pos = jnp.hstack([emb, nodes["position"]])
# emb_vel = jnp.hstack(
# [fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
# return frozendict({"node_embed": emb,
# "node_pos_embed": emb_pos,
# "node_vel_embed": emb_vel,
# })
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
# if useonlyedge:
# def edge_node_to_V_fn(edges, nodes):
# vij = ff1(edges["edge_embed"])
# # print(vij, edges["eij"])
# return vij.sum()
# else:
# def edge_node_to_V_fn(edges, nodes):
# vij = ff1(edges["edge_embed"]).sum()
# vi = 0
# vi = vi + ff2(nodes["node_embed"]).sum()
# vi = vi + ff3(nodes["node_pos_embed"]).sum()
# return vij + vi
def node_to_T_fn(nodes):
#return ke(nodes["node_vel_embed"]).sum()
# print(nodes["node_embed"].shape[0])
#timecolumn = jnp.full((nodes["node_embed"].shape[0],1),timestep)
return ke(nodes["node_embed"])
# if not(useT):
# node_to_T_fn = None
Net = GNNet_modified(N=mpass,
T_fn = node_to_T_fn,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
return Net(graph)
def GNNet_no_velocity(
V_fn: GN_to_V_Fn,
initial_edge_embed_fn: Optional[GNUpdateEdgeFn],
initial_node_embed_fn: Optional[GNUpdateEdgeFn],
update_edge_fn: Optional[GNUpdateEdgeFn],
update_node_fn: Optional[GNUpdateNodeFn],
T_fn: GN_to_T_Fn = None,
update_global_fn: Optional[GNUpdateGlobalFn] = None,
aggregate_nodes_for_globals_fn: AggregateNodesToGlobalsFn = utils
.segment_sum,
aggregate_edges_for_globals_fn: AggregateEdgesToGlobalsFn = utils
.segment_sum,
attention_logit_fn: Optional[AttentionLogitFn] = None,
attention_normalize_fn: Optional[AttentionNormalizeFn] = utils
.segment_softmax,
attention_reduce_fn: Optional[AttentionReduceFn] = None,
N=1,):
"""Returns a method that applies a configured GraphNetwork.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
than the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Example usage::
gn = GraphNetwork(update_edge_function,
update_node_function, **kwargs)
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
graph = gn(graph)
Args:
update_edge_fn: function used to update the edges or None to deactivate edge
updates.
update_node_fn: function used to update the nodes or None to deactivate node
updates.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals.
attention_logit_fn: function used to calculate the attention weights or
None to deactivate attention mechanism.
attention_normalize_fn: function used to normalize raw attention logits or
None if attention mechanism is not active.
attention_reduce_fn: function used to apply weights to the edge features or
None if attention mechanism is not active.
Returns:
A method that applies the configured GraphNetwork.
"""
def not_both_supplied(x, y): return (
x != y) and ((x is None) or (y is None))
if not_both_supplied(attention_reduce_fn, attention_logit_fn):
raise ValueError(('attention_logit_fn and attention_reduce_fn must both be'
' supplied.'))
def _ApplyGraphNet(graph):
"""Applies a configured GraphNetwork to a graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Many popular Graph Neural Networks can be implemented as special cases of
GraphNets, for more information please see the paper.
Args:
graph: a `GraphsTuple` containing the graph.
Returns:
Updated `GraphsTuple`.
"""
# pylint: disable=g-long-lambda
nodes, edges, receivers, senders, globals_, n_node, n_edge = graph
# Equivalent to jnp.sum(n_node), but jittable
# calculate number of nodes in graph
sum_n_node = tree.tree_leaves(nodes)[0].shape[0]
# calculate number of edges in graph
sum_n_edge = senders.shape[0]
# check if all all node array are of same length = number of nodes
if not tree.tree_all(
tree.tree_map(lambda n: n.shape[0] == sum_n_node, nodes)):
raise ValueError(
'All node arrays in nest must contain the same number of nodes.')
# Initial sent info
sent_attributes = tree.tree_map(lambda n: n[senders], nodes)
# Initial received info
received_attributes = tree.tree_map(lambda n: n[receivers], nodes)
# Here we scatter the global features to the corresponding edges,
# giving us tensors of shape [num_edges, global_feat].
# i.e create an array per edge for global attributes
global_edge_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_edge, axis=0, total_repeat_length=sum_n_edge), globals_)
# Here we scatter the global features to the corresponding nodes,
# giving us tensors of shape [num_nodes, global_feat].
# i.e create an array per node for global attributes
global_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_node, axis=0, total_repeat_length=sum_n_node), globals_)
# apply initial edge embeddings
if initial_edge_embed_fn:
edges = initial_edge_embed_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
# apply initial node embeddings
if initial_node_embed_fn:
nodes = initial_node_embed_fn(nodes, sent_attributes,
received_attributes, global_attributes)
# Now perform message passing for N times
for pass_i in range(N):
if attention_logit_fn:
logits = attention_logit_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
tree_calculate_weights = functools.partial(
attention_normalize_fn,
segment_ids=receivers,
num_segments=sum_n_node)
weights = tree.tree_map(tree_calculate_weights, logits)
edges = attention_reduce_fn(edges, weights)
if update_node_fn:
nodes = update_node_fn(
nodes, edges, senders, receivers,
global_attributes, sum_n_node)
if update_edge_fn:
senders_attributes = tree.tree_map(
lambda n: n[senders], nodes)
receivers_attributes = tree.tree_map(
lambda n: n[receivers], nodes)
edges = update_edge_fn(edges, senders_attributes, receivers_attributes,
global_edge_attributes, pass_i == N-1)
if update_global_fn:
n_graph = n_node.shape[0]
graph_idx = jnp.arange(n_graph)
# To aggregate nodes and edges from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# For example, if you have `n_node=[1,2]`, we construct the tensor
# [0, 1, 1]. We then do the same for edges.
node_gr_idx = jnp.repeat(
graph_idx, n_node, axis=0, total_repeat_length=sum_n_node)
edge_gr_idx = jnp.repeat(
graph_idx, n_edge, axis=0, total_repeat_length=sum_n_edge)
# We use the aggregation function to pool the nodes/edges per graph.
node_attributes = tree.tree_map(
lambda n: aggregate_nodes_for_globals_fn(
n, node_gr_idx, n_graph),
nodes)
edge_attribtutes = tree.tree_map(
lambda e: aggregate_edges_for_globals_fn(
e, edge_gr_idx, n_graph),
edges)
# These pooled nodes are the inputs to the global update fn.
globals_ = update_global_fn(
node_attributes, edge_attribtutes, globals_)
#V = 0.0
if V_fn is not None:
V = V_fn(edges, nodes)
# T = 0.0
# if T_fn is not None:
# T += T_fn(nodes)
# pylint: enable=g-long-lambda
return gn_graph.GraphsTuple(
nodes=nodes,
edges=edges,
receivers=receivers,
senders=senders,
globals=globals_,
n_node=n_node,
n_edge=n_edge), V#, T
return _ApplyGraphNet
def cal_graph_no_velocity(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos = jnp.hstack([emb, nodes["position"]])
emb_vel = jnp.hstack(
[fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
return frozendict({"node_embed": emb,
"node_pos_embed": emb_pos,
"node_vel_embed": emb_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
if useonlyedge:
def edge_node_to_V_fn(edges, nodes):
#vij = ff1(edges["edge_embed"])
vi = ff2(nodes["node_embed"])
# print(vij, edges["eij"])
return vi#.sum()
else:
def edge_node_to_V_fn(edges, nodes):
#vij = ff1(edges["edge_embed"])#.sum()
vi = 0
vi = vi + ff2(nodes["node_embed"])#.sum()
vi = vi + ff3(nodes["node_pos_embed"])#.sum()
return vi
def node_to_T_fn(nodes):
return ke(nodes["node_vel_embed"]).sum()
if not(useT):
node_to_T_fn = None
Net = GNNet_no_velocity(N=mpass,
V_fn=edge_node_to_V_fn,
#T_fn=node_to_T_fn,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
return Net(graph)
def cal_graph_no_velocity_input(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
#emb = fb(eij)
emb = fb(dr)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos = jnp.hstack([emb, nodes["position"]])
emb_vel = jnp.hstack(
[fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
return frozendict({"node_embed": emb,
"node_pos_embed": emb_pos,
"node_vel_embed": emb_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
if useonlyedge:
def edge_node_to_V_fn(edges, nodes):
#vij = ff1(edges["edge_embed"])
vi = ff2(nodes["node_embed"])
# print(vij, edges["eij"])
return vi#.sum()
else:
def edge_node_to_V_fn(edges, nodes):
#vij = ff1(edges["edge_embed"])#.sum()
vi = 0
vi = vi + ff2(nodes["node_embed"])#.sum()
vi = vi + ff3(nodes["node_pos_embed"])#.sum()
return vi
def node_to_T_fn(nodes):
return ke(nodes["node_vel_embed"]).sum()
if not(useT):
node_to_T_fn = None
Net = GNNet_no_velocity(N=mpass,
V_fn=edge_node_to_V_fn,
#T_fn=node_to_T_fn,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
return Net(graph) | 63,572 | 35.620392 | 100 | py |
benchmarking_graph | benchmarking_graph-main/src/fgn1.py | from functools import partial
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, lax, random
from jax_md.nn import GraphNetEncoder
from jraph import GraphMapFeatures, GraphNetwork, GraphsTuple
from src.models import SquarePlus, forward_pass, initialize_mlp
class GraphEncodeNet():
def __init__(self, N, embedding_fn, model_fn, final_fn):
self.N = N
self._encoder = GraphMapFeatures(
embedding_fn('EdgeEncoder'),
embedding_fn('NodeEncoder'),
embedding_fn('GlobalEncoder'))
self._propagation_network = GraphNetwork(
model_fn('EdgeFunction'),
model_fn('NodeFunction'),
model_fn('GlobalFunction'), aggregate_edges_for_globals_fn=lambda *x: jnp.array([0.0]))
self._final = GraphNetwork(
final_fn('EdgeFunction'),
final_fn('NodeFunction'),
final_fn('GlobalFunction'), aggregate_edges_for_globals_fn=lambda *x: jnp.array([0.0]))
def __call__(self, graph):
output = self._encoder(graph)
for _ in range(self.N):
output = self._propagation_network(output)
output = self._final(output)
return output
def cal(params, graph, mpass=1):
ee_params = params["ee_params"]
ne_params = params["ne_params"]
e_params = params["e_params"]
n_params = params["n_params"]
g_params = params["g_params"]
def node_em(nodes):
out = jnp.hstack([v for k, v in nodes.items()])
def fn(out):
return forward_pass(ne_params, out, activation_fn=SquarePlus)
out = jax.vmap(fn)(out)
return {"embed": out}
def edge_em(edges):
out = edges["dij"]
out = jax.vmap(lambda p, x: forward_pass(p, x.reshape(-1)),
in_axes=(None, 0))(ee_params, out)
return {"embed": out}
embedding = {
"EdgeEncoder": edge_em,
"NodeEncoder": node_em,
"GlobalEncoder": None,
}
def embedding_fn(arg): return embedding[arg]
def edge_fn(edges, sent_attributes, received_attributes, global_):
out = jnp.hstack([edges["embed"], sent_attributes["embed"],
received_attributes["embed"]])
out = jax.vmap(forward_pass, in_axes=(None, 0))(e_params, out)
return {"embed": out}
def node_fn(nodes, sent_attributes, received_attributes, global_):
out = jnp.hstack([nodes["embed"], sent_attributes["embed"],
received_attributes["embed"]])
out = jax.vmap(forward_pass, in_axes=(None, 0))(n_params, out)
return {"embed": out}
model = {
"EdgeFunction": edge_fn,
"NodeFunction": node_fn,
"GlobalFunction": None,
}
def model_fn(arg): return model[arg]
final = {
"EdgeFunction": lambda *x: x[0],
"NodeFunction": lambda *x: x[0],
"GlobalFunction": lambda node_attributes, edge_attribtutes, globals_:
forward_pass(g_params, node_attributes["embed"].reshape(-1)),
# "GlobalFunction": lambda node_attributes, edge_attribtutes, globals_:
# node_attributes["embed"].sum()
}
def final_fn(arg): return final[arg]
net = GraphEncodeNet(mpass, embedding_fn, model_fn, final_fn)
graph = net(graph)
return graph
def cal_energy(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
return graph.globals.sum()
def cal_acceleration(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
acc_params = params["acc_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
acc_params, graph.nodes["embed"])
return out
def acceleration_node(R,V, params, **kwargs):
inp = jnp.hstack([R.flatten(),V.flatten()])
out = jax.vmap(forward_pass, in_axes=(None, 0))(params, inp)
return out
def cal_l(params, graph, **kwargs):
graph = cal(params, graph, **kwargs)
L_params = params["l_params"]
out = jax.vmap(forward_pass, in_axes=(None, 0))(
L_params, graph.nodes["embed"])
return out.sum()
# def cal_zdot(params, graph, **kwargs):
# graph = cal(params, graph, **kwargs)
# zdot_params = params["zdot_params"]
# out = jax.vmap(forward_pass, in_axes=(None, 0))(
# zdot_params, graph.nodes["embed"])
# return out
| 4,353 | 30.781022 | 99 | py |
benchmarking_graph | benchmarking_graph-main/src/nve.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Tuple, TypeVar, Union
import jax.numpy as np
from jax import random
from jax_md import dataclasses, interpolate, quantity, simulate, space, util
static_cast = util.static_cast
# Types
Array = util.Array
f32 = util.f32
f64 = util.f64
ShiftFn = space.ShiftFn
T = TypeVar('T')
InitFn = Callable[..., T]
ApplyFn = Callable[[T], T]
Simulator = Tuple[InitFn, ApplyFn]
NVEState = simulate.NVEState
# NVEState2 = simulate.NVEState2
Schedule = Union[Callable[..., float], float]
# pylint: disable=invalid-nameNVEState
class NVEStates():
def __init__(self, states):
self.position = states.position
self.velocity = states.velocity
self.force = states.force
self.mass = states.mass
self.index = 0
def __len__(self):
return len(self.position)
def __getitem__(self, key):
if isinstance(key, int):
return NVEState(self.position[key], self.velocity[key],
self.force[key], self.mass[key])
else:
return NVEState(self.position[key],
self.velocity[key],
self.force[key],
self.mass[key])
def __iter__(self,):
return (self.__getitem__(i) for i in range(len(self)))
# class NVEStates2():
# def __init__(self, states):
# self.position = states.position
# self.velocity = states.velocity
# # self.force = states.force
# self.mass = states.mass
# self.index = 0
# def __len__(self):
# return len(self.position)
# def __getitem__(self, key):
# if isinstance(key, int):
# return NVEState2(self.position[key], self.velocity[key], self.mass[key])
# else:
# return NVEState2(self.position[key], self.velocity[key], self.mass[key])
# def __iter__(self,):
# return (self.__getitem__(i) for i in range(len(self)))
def nve(energy_or_force_fn: Callable[..., Array],
shift_fn: ShiftFn,
dt: float) -> Simulator:
"""Simulates a system in the NVE ensemble.
Samples from the microcanonical ensemble in which the number of particles
(N), the system volume (V), and the energy (E) are held constant. We use a
standard velocity verlet integration scheme.
Args:
energy_or_force: A function that produces either an energy or a force from
a set of particle positions specified as an ndarray of shape
[n, spatial_dimension].
shift_fn: A function that displaces positions, R, by an amount dR. Both R
and dR should be ndarrays of shape [n, spatial_dimension].
dt: Floating point number specifying the timescale (step size) of the
simulation.
quant: Either a quantity.Energy or a quantity.Force specifying whether
energy_or_force is an energy or force respectively.
Returns:
See above.
"""
force_fn = energy_or_force_fn
dt_2 = 0.5 * dt ** 2
def init_fun(R: Array,
V: Array,
mass=f32(1.0),
**kwargs) -> NVEState:
mass = quantity.canonicalize_mass(mass)
return NVEState(R, V, force_fn(R, V, **kwargs), mass)
def apply_fun(state: NVEState, **kwargs) -> NVEState:
R, V, F, mass = dataclasses.astuple(state)
A = F / mass
dR = V * dt + A * dt_2
R, V = shift_fn(R, dR, V)
F = force_fn(R, V, **kwargs)
A_prime = F / mass
V = V + f32(0.5) * (A + A_prime) * dt
return NVEState(R, V, F, mass)
return init_fun, apply_fun
def nve4(energy_or_force_fn: Callable[..., Array],
shift_fn: ShiftFn,
dt: float) -> Simulator:
"""Simulates a system in the NVE ensemble.
Samples from the microcanonical ensemble in which the number of particles
(N), the system volume (V), and the energy (E) are held constant. We use a
standard velocity verlet integration scheme.
Args:
energy_or_force: A function that produces either an energy or a force from
a set of particle positions specified as an ndarray of shape
[n, spatial_dimension].
shift_fn: A function that displaces positions, R, by an amount dR. Both R
and dR should be ndarrays of shape [n, spatial_dimension].
dt: Floating point number specifying the timescale (step size) of the
simulation.
quant: Either a quantity.Energy or a quantity.Force specifying whether
energy_or_force is an energy or force respectively.
Returns:
See above.
"""
force_fn = energy_or_force_fn
dt_2 = 0.5 * dt ** 2
def init_fun(R: Array,
V: Array,
mass=f32(1.0),
**kwargs) -> NVEState:
mass = quantity.canonicalize_mass(mass)
return NVEState(R, V, force_fn(R, V, **kwargs), mass)
def apply_fun(state: NVEState, **kwargs) -> NVEState:
R, V, F, mass = dataclasses.astuple(state)
A = F / mass
dR = V * dt + A * dt_2
R, V = shift_fn(R, dR, V)
F = force_fn(R, V, **kwargs)
A_prime = F / mass
V = V + f32(0.5) * (A + A_prime) * dt
return NVEState(R, V, F, mass)
A = F/mass
k1 = force_fn(R, V, **kwargs)
k2 = force_fn(R+V)
return init_fun, apply_fun
def nve2(params, change_R_V, dt: float) -> Simulator:
"""Simulates a system in the NVE ensemble.
Samples from the microcanonical ensemble in which the number of particles
(N), the system volume (V), and the energy (E) are held constant. We use a
standard velocity verlet integration scheme.
Args:
energy_or_force: A function that produces either an energy or a force from
a set of particle positions specified as an ndarray of shape
[n, spatial_dimension].
shift_fn: A function that displaces positions, R, by an amount dR. Both R
and dR should be ndarrays of shape [n, spatial_dimension].
dt: Floating point number specifying the timescale (step size) of the
simulation.
quant: Either a quantity.Energy or a quantity.Force specifying whether
energy_or_force is an energy or force respectively.
Returns:
See above.
"""
def init_fun(R: Array, V: Array, mass=f32(1.0), **kwargs) -> NVEState:
mass = quantity.canonicalize_mass(mass)
return NVEState(R, V, V, mass)
def apply_fun(state: NVEState, **kwargs) -> NVEState:
R, V, F, mass = dataclasses.astuple(state)
change = change_R_V(R, V, params)
#print(change.shape)
change_ = np.split(change,2,axis=1)
dR = change_[0]
dV = change_[1]
return NVEState(R + dR, V + dV, V, mass)
return init_fun, apply_fun
def nve3(params, change_Acc, dt: float) -> Simulator:
"""Simulates a system in the NVE ensemble.
Samples from the microcanonical ensemble in which the number of particles
(N), the system volume (V), and the energy (E) are held constant. We use a
standard velocity verlet integration scheme.
Args:
energy_or_force: A function that produces either an energy or a force from
a set of particle positions specified as an ndarray of shape
[n, spatial_dimension].
shift_fn: A function that displaces positions, R, by an amount dR. Both R
and dR should be ndarrays of shape [n, spatial_dimension].
dt: Floating point number specifying the timescale (step size) of the
simulation.
quant: Either a quantity.Energy or a quantity.Force specifying whether
energy_or_force is an energy or force respectively.
Returns:
See above.
"""
dt_2 = 0.5 * dt ** 2
def init_fun(R: Array, V: Array, mass=f32(1.0), **kwargs) -> NVEState:
mass = quantity.canonicalize_mass(mass)
return NVEState(R, V, change_Acc(R,V,params), mass)
def apply_fun(state: NVEState, **kwargs) -> NVEState:
R, V, F, mass = dataclasses.astuple(state)
A = F / mass
dR = V * dt + A * dt_2
R = R + dR
#R, V = shift_fn(R, dR, V)
F = change_Acc(R, V, params)
A_prime = F / mass
V = V + f32(0.5) * (A + A_prime) * dt
return NVEState(R, V, F, mass)
# R, V, F, mass = dataclasses.astuple(state)
# A = F/mass
# A_prime = change_Acc(R, V, params)
# R = R + V * dt + f32(0.5) * A * dt * dt
# V = V + f32(0.5) * (A + A_prime) * dt
# return NVEState(R, V, A_prime * mass, mass)
return init_fun, apply_fun
| 9,149 | 35.454183 | 86 | py |
benchmarking_graph | benchmarking_graph-main/src/graph1.py | # Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of Graph Neural Network models."""
import functools
import sunau
from typing import Any, Callable, Iterable, Mapping, Optional, Union
import jax
import jax.numpy as jnp
import jax.tree_util as tree
import numpy as np
from frozendict import frozendict
from jax import vmap
from jraph._src import graph as gn_graph
from jraph._src import utils
from .models import SquarePlus, forward_pass
jax.tree_util.register_pytree_node(
frozendict,
flatten_func=lambda s: (tuple(s.values()), tuple(s.keys())),
unflatten_func=lambda k, xs: frozendict(zip(k, xs)))
# As of 04/2020 pytype doesn't support recursive types.
# pytype: disable=not-supported-yet
ArrayTree = Union[jnp.ndarray,
Iterable['ArrayTree'], Mapping[Any, 'ArrayTree']]
# All features will be an ArrayTree.
NodeFeatures = EdgeFeatures = SenderFeatures = ReceiverFeatures = Globals = ArrayTree
# Signature:
# (edges of each node to be aggregated, segment ids, number of segments) ->
# aggregated edges
AggregateEdgesToNodesFn = Callable[
[EdgeFeatures, jnp.ndarray, int], NodeFeatures]
# Signature:
# (nodes of each graph to be aggregated, segment ids, number of segments) ->
# aggregated nodes
AggregateNodesToGlobalsFn = Callable[[NodeFeatures, jnp.ndarray, int],
Globals]
# Signature:
# (edges of each graph to be aggregated, segment ids, number of segments) ->
# aggregated edges
AggregateEdgesToGlobalsFn = Callable[[EdgeFeatures, jnp.ndarray, int],
Globals]
# Signature:
# (edge features, sender node features, receiver node features, globals) ->
# attention weights
AttentionLogitFn = Callable[
[EdgeFeatures, SenderFeatures, ReceiverFeatures, Globals], ArrayTree]
# Signature:
# (edge features, weights) -> edge features for node update
AttentionReduceFn = Callable[[EdgeFeatures, ArrayTree], EdgeFeatures]
# Signature:
# (edges to be normalized, segment ids, number of segments) ->
# normalized edges
AttentionNormalizeFn = Callable[[EdgeFeatures, jnp.ndarray, int], EdgeFeatures]
# Signature:
# (edge features, sender node features, receiver node features, globals) ->
# updated edge features
GNUpdateEdgeFn = Callable[
[EdgeFeatures, SenderFeatures, ReceiverFeatures, Globals], EdgeFeatures]
# Signature:
# (node features, outgoing edge features, incoming edge features,
# globals) -> updated node features
GNUpdateNodeFn = Callable[
[NodeFeatures, SenderFeatures, ReceiverFeatures, Globals], NodeFeatures]
GNUpdateGlobalFn = Callable[[NodeFeatures, EdgeFeatures, Globals], Globals]
# Signature:
# (node features, outgoing edge features, incoming edge features,
# globals) -> updated node features
# V: Potential energy of edge
GN_to_V_Fn = Callable[[EdgeFeatures, NodeFeatures], float]
GN_to_T_Fn = Callable[[NodeFeatures], float]
def GNNet(
V_fn: GN_to_V_Fn,
initial_edge_embed_fn: Optional[GNUpdateEdgeFn],
initial_node_embed_fn: Optional[GNUpdateEdgeFn],
update_edge_fn: Optional[GNUpdateEdgeFn],
update_node_fn: Optional[GNUpdateNodeFn],
T_fn: GN_to_T_Fn = None,
update_global_fn: Optional[GNUpdateGlobalFn] = None,
aggregate_nodes_for_globals_fn: AggregateNodesToGlobalsFn = utils
.segment_sum,
aggregate_edges_for_globals_fn: AggregateEdgesToGlobalsFn = utils
.segment_sum,
attention_logit_fn: Optional[AttentionLogitFn] = None,
attention_normalize_fn: Optional[AttentionNormalizeFn] = utils
.segment_softmax,
attention_reduce_fn: Optional[AttentionReduceFn] = None,
N=1,):
"""Returns a method that applies a configured GraphNetwork.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
than the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Example usage::
gn = GraphNetwork(update_edge_function,
update_node_function, **kwargs)
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
graph = gn(graph)
Args:
update_edge_fn: function used to update the edges or None to deactivate edge
updates.
update_node_fn: function used to update the nodes or None to deactivate node
updates.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
aggregate_edges_for_nodes_fn: function used to aggregate messages to each
node.
aggregate_nodes_for_globals_fn: function used to aggregate the nodes for the
globals.
aggregate_edges_for_globals_fn: function used to aggregate the edges for the
globals.
attention_logit_fn: function used to calculate the attention weights or
None to deactivate attention mechanism.
attention_normalize_fn: function used to normalize raw attention logits or
None if attention mechanism is not active.
attention_reduce_fn: function used to apply weights to the edge features or
None if attention mechanism is not active.
Returns:
A method that applies the configured GraphNetwork.
"""
def not_both_supplied(x, y): return (
x != y) and ((x is None) or (y is None))
if not_both_supplied(attention_reduce_fn, attention_logit_fn):
raise ValueError(('attention_logit_fn and attention_reduce_fn must both be'
' supplied.'))
def _ApplyGraphNet(graph):
"""Applies a configured GraphNetwork to a graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Many popular Graph Neural Networks can be implemented as special cases of
GraphNets, for more information please see the paper.
Args:
graph: a `GraphsTuple` containing the graph.
Returns:
Updated `GraphsTuple`.
"""
# pylint: disable=g-long-lambda
nodes, edges, receivers, senders, globals_, n_node, n_edge = graph
# Equivalent to jnp.sum(n_node), but jittable
# calculate number of nodes in graph
sum_n_node = tree.tree_leaves(nodes)[0].shape[0]
# calculate number of edges in graph
sum_n_edge = senders.shape[0]
# check if all all node array are of same length = number of nodes
if not tree.tree_all(
tree.tree_map(lambda n: n.shape[0] == sum_n_node, nodes)):
raise ValueError(
'All node arrays in nest must contain the same number of nodes.')
# Initial sent info
sent_attributes = tree.tree_map(lambda n: n[senders], nodes)
# Initial received info
received_attributes = tree.tree_map(lambda n: n[receivers], nodes)
# Here we scatter the global features to the corresponding edges,
# giving us tensors of shape [num_edges, global_feat].
# i.e create an array per edge for global attributes
global_edge_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_edge, axis=0, total_repeat_length=sum_n_edge), globals_)
# Here we scatter the global features to the corresponding nodes,
# giving us tensors of shape [num_nodes, global_feat].
# i.e create an array per node for global attributes
global_attributes = tree.tree_map(lambda g: jnp.repeat(
g, n_node, axis=0, total_repeat_length=sum_n_node), globals_)
# apply initial edge embeddings
if initial_edge_embed_fn:
edges = initial_edge_embed_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
# apply initial node embeddings
if initial_node_embed_fn:
nodes = initial_node_embed_fn(nodes, sent_attributes,
received_attributes, global_attributes)
# Now perform message passing for N times
for pass_i in range(N):
if attention_logit_fn:
logits = attention_logit_fn(edges, sent_attributes, received_attributes,
global_edge_attributes)
tree_calculate_weights = functools.partial(
attention_normalize_fn,
segment_ids=receivers,
num_segments=sum_n_node)
weights = tree.tree_map(tree_calculate_weights, logits)
edges = attention_reduce_fn(edges, weights)
if update_node_fn:
nodes = update_node_fn(
nodes, edges, senders, receivers,
global_attributes, sum_n_node)
if update_edge_fn:
senders_attributes = tree.tree_map(
lambda n: n[senders], nodes)
receivers_attributes = tree.tree_map(
lambda n: n[receivers], nodes)
edges = update_edge_fn(edges, senders_attributes, receivers_attributes,
global_edge_attributes, pass_i == N-1)
if update_global_fn:
n_graph = n_node.shape[0]
graph_idx = jnp.arange(n_graph)
# To aggregate nodes and edges from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# For example, if you have `n_node=[1,2]`, we construct the tensor
# [0, 1, 1]. We then do the same for edges.
node_gr_idx = jnp.repeat(
graph_idx, n_node, axis=0, total_repeat_length=sum_n_node)
edge_gr_idx = jnp.repeat(
graph_idx, n_edge, axis=0, total_repeat_length=sum_n_edge)
# We use the aggregation function to pool the nodes/edges per graph.
node_attributes = tree.tree_map(
lambda n: aggregate_nodes_for_globals_fn(
n, node_gr_idx, n_graph),
nodes)
edge_attribtutes = tree.tree_map(
lambda e: aggregate_edges_for_globals_fn(
e, edge_gr_idx, n_graph),
edges)
# These pooled nodes are the inputs to the global update fn.
globals_ = update_global_fn(
node_attributes, edge_attribtutes, globals_)
V = 0.0
if V_fn is not None:
V += V_fn(edges, nodes)
T = 0.0
if T_fn is not None:
T += T_fn(nodes)
# pylint: enable=g-long-lambda
return gn_graph.GraphsTuple(
nodes=nodes,
edges=edges,
receivers=receivers,
senders=senders,
globals=globals_,
n_node=n_node,
n_edge=n_edge), V, T
return _ApplyGraphNet
# Signature:
# edge features -> embedded edge features
EmbedEdgeFn = Callable[[EdgeFeatures], EdgeFeatures]
# Signature:
# node features -> embedded node features
EmbedNodeFn = Callable[[NodeFeatures], NodeFeatures]
# Signature:
# globals features -> embedded globals features
EmbedGlobalFn = Callable[[Globals], Globals]
def get_fully_connected_senders_and_receivers(
num_particles: int, self_edges: bool = False,
):
"""Returns senders and receivers for fully connected particles."""
particle_indices = np.arange(num_particles)
senders, receivers = np.meshgrid(particle_indices, particle_indices)
senders, receivers = senders.flatten(), receivers.flatten()
if not self_edges:
mask = senders != receivers
senders, receivers = senders[mask], receivers[mask]
return senders, receivers
def cal_graph(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos = jnp.hstack([emb, nodes["position"]])
emb_vel = jnp.hstack(
[fneke(ohe), jnp.sum(jnp.square(nodes["velocity"]), axis=1, keepdims=True)])
return frozendict({"node_embed": emb,
"node_pos_embed": emb_pos,
"node_vel_embed": emb_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
if useonlyedge:
def edge_node_to_V_fn(edges, nodes):
vij = ff1(edges["edge_embed"])
# print(vij, edges["eij"])
return vij.sum()
else:
def edge_node_to_V_fn(edges, nodes):
vij = ff1(edges["edge_embed"]).sum()
vi = 0
vi = vi + ff2(nodes["node_embed"]).sum()
vi = vi + ff3(nodes["node_pos_embed"]).sum()
return vij + vi
def node_to_T_fn(nodes):
return ke(nodes["node_vel_embed"]).sum()
if not(useT):
node_to_T_fn = None
Net = GNNet(N=mpass,
V_fn=edge_node_to_V_fn,
T_fn=node_to_T_fn,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
return Net(graph)
def mcgnode_cal_force_q_qdot(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
mass_params = params["mass"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def mass(n):
def fn(ni):
out = forward_pass(mass_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos_vel = jnp.hstack([emb, nodes["position"],nodes["velocity"]])
return frozendict({"node_embed": emb,
"node_pos_vel_embed": emb_pos_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
def edge_node_to_force(edges, nodes, sen, rec, sum_n_node):
ai = 0
fij = ff1(edges["edge_embed"])
fi1 = jax.ops.segment_sum(fij, rec, sum_n_node)
fi2 = jax.ops.segment_sum(-fij, sen, sum_n_node)
ai = ai+ (fi1+fi2)
ai = ai + ff3(nodes["node_pos_vel_embed"])
return ai
def node_to_M_fn(nodes):
return mass(nodes["node_embed"])
Net = GNNet(N=mpass,
V_fn=None,
T_fn=None,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
graph, V, T = Net(graph)
return jnp.hstack([edge_node_to_force( graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node), node_to_M_fn(graph.nodes)])
def cdgnode_cal_force_q_qdot(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
mass_params = params["mass"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def mass(n):
def fn(ni):
out = forward_pass(mass_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos_vel = jnp.hstack([emb, nodes["position"],nodes["velocity"]])
return frozendict({"node_embed": emb,
"node_pos_vel_embed": emb_pos_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
def edge_node_to_force(edges, nodes, sen, rec, sum_n_node):
ai = 0
ai = ai + ff2(nodes["node_embed"])
ai = ai + ff3(nodes["node_pos_vel_embed"])
return ai
def node_to_M_fn(nodes):
return mass(nodes["node_embed"])
Net = GNNet(N=mpass,
V_fn=None,
T_fn=None,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
graph, V, T = Net(graph)
return jnp.hstack([edge_node_to_force( graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node), node_to_M_fn(graph.nodes)])
def gnode_cal_force_q_qdot(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
mass_params = params["mass"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def mass(n):
def fn(ni):
out = forward_pass(mass_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
# eij = dr
eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos_vel = jnp.hstack([emb, nodes["position"],nodes["velocity"]])
return frozendict({"node_embed": emb_pos_vel,
"mass_embed":emb,
# "node_pos_vel_embed": emb_pos_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
def edge_node_to_force(edges, nodes, sen, rec, sum_n_node):
ai = 0
ai = ai + ff2(nodes["node_embed"])
# ai = ai + ff3(nodes["node_pos_vel_embed"])
return ai
def node_to_M_fn(nodes):
return mass(nodes["mass_embed"])
Net = GNNet(N=mpass,
V_fn=None,
T_fn=None,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
graph, V, T = Net(graph)
return jnp.hstack([edge_node_to_force( graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node), node_to_M_fn(graph.nodes)])
def a_gnode_cal_force_q_qdot(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
mass_params = params["mass"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def mass(n):
def fn(ni):
out = forward_pass(mass_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
eij = jnp.array(dr)
# eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos_vel = jnp.hstack([emb, nodes["position"],nodes["velocity"]])
return frozendict({"node_embed": emb_pos_vel,
"mass_embed":emb,
# "node_pos_vel_embed": emb_pos_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
def edge_node_to_force(edges, nodes, sen, rec, sum_n_node):
ai = 0
ai = ai + ff2(nodes["node_embed"])
# ai = ai + ff3(nodes["node_pos_vel_embed"])
return ai
def node_to_M_fn(nodes):
return mass(nodes["mass_embed"])
Net = GNNet(N=mpass,
V_fn=None,
T_fn=None,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
graph, V, T = Net(graph)
# return jnp.hstack([edge_node_to_force( graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node), node_to_M_fn(graph.nodes)])
return edge_node_to_force( graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node)
def a_cdgnode_cal_force_q_qdot(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
mass_params = params["mass"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def mass(n):
def fn(ni):
out = forward_pass(mass_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
eij = jnp.array(dr)
# eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos_vel = jnp.hstack([emb, nodes["position"],nodes["velocity"]])
return frozendict({"node_embed": emb,
# "node_pos_vel_embed": emb_pos_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
def edge_node_to_force(edges, nodes, sen, rec, sum_n_node):
ai = 0
ai = ai + ff2(nodes["node_embed"])
# ai = ai + ff3(nodes["node_pos_vel_embed"])
return ai
def node_to_M_fn(nodes):
return mass(nodes["node_embed"])
Net = GNNet(N=mpass,
V_fn=None,
T_fn=None,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
graph, V, T = Net(graph)
# return jnp.hstack([edge_node_to_force( graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node), node_to_M_fn(graph.nodes)])
return edge_node_to_force(graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node)
def a_mcgnode_cal_force_q_qdot(params, graph, eorder=None, mpass=1,
useT=True, useonlyedge=False, act_fn=SquarePlus):
fb_params = params["fb"]
fne_params = params["fne"]
fneke_params = params["fneke"]
fv_params = params["fv"]
fe_params = params["fe"]
ff1_params = params["ff1"]
ff2_params = params["ff2"]
ff3_params = params["ff3"]
ke_params = params["ke"]
mass_params = params["mass"]
num_species = 1
def onehot(n):
def fn(n):
out = jax.nn.one_hot(n, num_species)
return out
out = vmap(fn)(n.reshape(-1,))
return out
def fne(n):
def fn(ni):
out = forward_pass(fne_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fneke(n):
def fn(ni):
out = forward_pass(fneke_params, ni, activation_fn=lambda x: x)
return out
out = vmap(fn, in_axes=(0))(n)
return out
def fb(e):
def fn(eij):
out = forward_pass(fb_params, eij, activation_fn=act_fn)
return out
out = vmap(fn, in_axes=(0))(e)
return out
def fv(n, e, s, r, sum_n_node):
c1ij = jnp.hstack([n[r], e])
out = vmap(lambda x: forward_pass(fv_params, x))(c1ij)
return n + jax.ops.segment_sum(out, r, sum_n_node)
def fe(e, s, r):
def fn(hi, hj):
c2ij = hi * hj
out = forward_pass(fe_params, c2ij, activation_fn=act_fn)
return out
out = e + vmap(fn, in_axes=(0, 0))(s, r)
return out
def ff1(e):
def fn(eij):
out = forward_pass(ff1_params, eij, activation_fn=act_fn)
return out
out = vmap(fn)(e)
return out
def ff2(n):
def fn(ni):
out = forward_pass(ff2_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ff3(n):
def fn(ni):
out = forward_pass(ff3_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def ke(n):
def fn(ni):
out = forward_pass(ke_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
def mass(n):
def fn(ni):
out = forward_pass(mass_params, ni, activation_fn=act_fn)
return out
out = vmap(fn)(n)
return out
# ================================================================================
def initial_edge_emb_fn(edges, senders, receivers, globals_):
del edges, globals_
dr = (senders["position"] - receivers["position"])
eij = jnp.array(dr)
# eij = jnp.sqrt(jnp.square(dr).sum(axis=1, keepdims=True))
emb = fb(eij)
return frozendict({"edge_embed": emb, "eij": eij})
def initial_node_emb_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
type_of_node = nodes["type"]
ohe = onehot(type_of_node)
emb = fne(ohe)
emb_pos_vel = jnp.hstack([emb, nodes["position"],nodes["velocity"]])
return frozendict({"node_embed": emb,
# "node_pos_vel_embed": emb_pos_vel,
})
def update_node_fn(nodes, edges, senders, receivers, globals_, sum_n_node):
del globals_
emb = fv(nodes["node_embed"], edges["edge_embed"],
senders, receivers, sum_n_node)
n = dict(nodes)
n.update({"node_embed": emb})
return frozendict(n)
def update_edge_fn(edges, senders, receivers, globals_, last_step):
del globals_
emb = fe(edges["edge_embed"], senders["node_embed"],
receivers["node_embed"])
if last_step:
if eorder is not None:
emb = (emb + fe(edges["edge_embed"][eorder],
receivers["node_embed"], senders["node_embed"])) / 2
return frozendict({"edge_embed": emb, "eij": edges["eij"]})
def edge_node_to_force(edges, nodes, sen, rec, sum_n_node):
ai = 0
fij = ff1(edges["edge_embed"])
fi1 = jax.ops.segment_sum(fij, rec, sum_n_node)
fi2 = jax.ops.segment_sum(-fij, sen, sum_n_node)
ai = ai+ (fi1+fi2)
# ai = ai + ff3(nodes["node_pos_vel_embed"])
return ai
# def node_to_M_fn(nodes):
# return mass(nodes["node_embed"])
Net = GNNet(N=mpass,
V_fn=None,
T_fn=None,
initial_edge_embed_fn=initial_edge_emb_fn,
initial_node_embed_fn=initial_node_emb_fn,
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
graph, V, T = Net(graph)
# return jnp.hstack([edge_node_to_force( graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node), node_to_M_fn(graph.nodes)])
return edge_node_to_force(graph.edges,graph.nodes,graph.senders,graph.receivers,graph.n_node)
| 47,723 | 33.408075 | 141 | py |
benchmarking_graph | benchmarking_graph-main/src/lnn.py | from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, vmap
from numpy.core.fromnumeric import reshape
from .models import ReLU, SquarePlus, forward_pass
def MAP(input_fn):
"""Map vmap for first input.
:param input_fn: function to map
:type input_fn: function
"""
def temp_g(x, *args, **kwargs):
def temp_f(x):
return input_fn(x, *args, **kwargs)
return vmap(temp_f, in_axes=0)(x)
return temp_g
def nonan(input_fn):
"""Apply nonan macro.
:param input_fn: input function
:type input_fn: function
"""
def out_fn(*args, **kwargs):
return jnp.nan_to_num(input_fn(*args, **kwargs))
out_fn.__doc__ = input_fn.__doc__
return out_fn
def describe_params(params_):
"""Print parameters.
:param params_: Parameters
:type params_: dict or list
:return: description of parameters.
:rtype: string
"""
if isinstance(params_, dict):
str_ = ""
for k, params in params_.items():
str_ = str_ + f"{k}\n" + \
"\n".join([f"\tLayer {ind}\n\tW: {p[0].shape}, b: {p[1].shape}"
for ind, p in enumerate(params)])
return str_
else:
return "\n".join([f"Layer {ind}\n\tW: {p[0].shape}, b: {p[1].shape}"
for ind, p in enumerate(params_)])
def FFLNN(x, v, params):
x_ = x.reshape(-1,)
return _T(v) - forward_pass(params, x_)[0]
def LNN(x, v, params):
"""
x: Vector
v: Vector
"""
x_ = x.reshape(-1, )
v_ = v.reshape(-1, )
return forward_pass(params, jnp.vstack([x_, v_]))[0]
def _V(x, params):
pass
def _T(v, mass=jnp.array([1.0])):
if len(mass) != len(v):
mass = mass[0]*jnp.ones((len(v)))
out = mass*jnp.square(v).sum(axis=1)
return 0.5*out.sum()
def _L(x, v, params):
pass
def lagrangian(x, v, params):
"""
lagrangian calls lnn._L
x: Vector
v: Vector
"""
return _L(x, v, params)
def calM(x, v, params):
return jax.hessian(lagrangian, 1)(x, v, params)
jcalM = jit(calM)
def calMinv(x, v, params):
return jnp.linalg.pinv(calM(x, v, params))
jcalMinv = jit(calMinv)
def acceleration(x, v, params):
Dim = x.shape[1]
N = x.shape[0]*Dim
M_1 = jcalMinv(x, v, params).reshape(N, N)
dx_L = jax.grad(lagrangian, 0)(x, v, params).reshape(N, 1)
dxdv_L = jax.jacobian(jax.jacobian(lagrangian, 1),
0)(x, v, params).reshape(N, N)
out = M_1 @ (dx_L - dxdv_L @ v.reshape(N, 1))
return out.reshape(-1, Dim)
def accelerationTV(x, v, params):
Dim = x.shape[1]
N = x.shape[0]
M_1 = jnp.linalg.pinv(jax.hessian(_T, 0)(v).reshape(N*Dim, N*Dim))
dx_L = jax.grad(lagrangian, 0)(x, v, params).reshape(-1, 1)
out = M_1 @ (dx_L)
return out.reshape(-1, Dim)
def accelerationFull(n, Dim, lagrangian=lagrangian, non_conservative_forces=None, external_force=None, constraints=None):
""" ̈q = M⁻¹(-C ̇q + Π + Υ - Aᵀ(AM⁻¹Aᵀ)⁻¹ ( AM⁻¹ (-C ̇q + Π + Υ + F ) + Adot ̇q ) + F )
:param T: [description], defaults to _T
:type T: [type], optional
:param lagrangian: [description], defaults to lagrangian
:type lagrangian: [type], optional
"""
def inv(x, *args, **kwargs):
return jnp.linalg.pinv(x, *args, **kwargs)
if non_conservative_forces == None:
def non_conservative_forces(x, v, params): return 0
if external_force == None:
def external_force(x, v, params): return 0
if constraints == None:
def constraints(x, v, params): return jnp.zeros((1, n*Dim))
eye = jnp.eye(n*Dim)
def dL_dv(R, V, params):
return jax.grad(lagrangian, 1)(R.reshape(n, Dim),
V.reshape(n, Dim), params).flatten()
def d2L_dv2(R, V, params):
return jax.jacobian(dL_dv, 1)(R, V, params)
# return eye*jnp.diag(jax.jacobian(dL_dv, 1)(R, V, params))
def fn(x, v, params):
N = n*Dim
# M⁻¹ = (∂²L/∂²v)⁻¹
M = d2L_dv2(x.flatten(), v.flatten(), params)
M_1 = inv(M)
# Π = ∂L/∂x
Π = jax.grad(lagrangian, 0)(x, v, params).reshape(
N, 1)
# C = ∂²L/∂v∂x
C = jax.jacobian(jax.jacobian(lagrangian, 1),
0)(x, v, params).reshape(N, N)
Υ = non_conservative_forces(x, v, params)
F = external_force(x, v, params)
A = constraints(x.reshape(-1), v.reshape(-1), params)
Aᵀ = A.T
AM_1 = A @ M_1
v = v.reshape(N, 1)
Ax = jax.jacobian(constraints, 0)(x.reshape(-1), v.reshape(-1), None)
Adot = Ax @ v.reshape(-1)
xx = (AM_1 @ (-C @ v + Π + Υ + F) + Adot @ v)
tmp = Aᵀ @ inv(AM_1 @ Aᵀ) @ xx
out = M_1 @ (-C @ v + Π + Υ - tmp + F)
return out.reshape(-1, Dim)
return fn
def accelerationModified(x, v, params):
Dim = x.shape[1]
N = x.shape[0]
M_1 = forward_pass(params["M_1"], v.reshape(-1, ))
M_1 = M_1.reshape(N*Dim, N*Dim)
dx_L = jax.grad(lagrangian, 0)(x, v, params["PEF"]).reshape(-1, )
dxdv_L = jax.jacobian(jax.jacobian(lagrangian, 1), 0)(
x, v, params["PEF"]).reshape(N*Dim, N*Dim)
F = (dx_L - dxdv_L @ v.reshape(-1, ))
out = M_1 @ F
return out.reshape(-1, Dim)
def force(x, v, params):
dx_L = jax.grad(lagrangian, 0)(x, v, params)
dxdv_L = jax.jacobian(jax.jacobian(lagrangian, 1), 0)(x, v, params)
out = dx_L - dxdv_L @ v
return out
def prediction_fn(X, params):
x, v = jnp.split(X, 2)
return acceleration(x, v, params)
# Make a batched version of the `predict` function
batch_prediction = vmap(prediction_fn, in_axes=(None, 0), out_axes=0)
# PEFs
# =============================================
def useNN(norm=True):
"""Create NNP function.
:param norm: if take norm of input, defaults to True
:type norm: bool, optional
:return: NNP function
:rtype: function
"""
if norm:
def f(x, params=None, cutoff=None):
x_ = jnp.linalg.norm(x, keepdims=True)
return jnp.where(x_ < cutoff, forward_pass(params, x_, activation_fn=SquarePlus), forward_pass(params, cutoff, activation_fn=SquarePlus))
return f
else:
def f(x, params=None, cutoff=None):
if cutoff is None:
return forward_pass(params, x, activation_fn=SquarePlus)
else:
return jnp.where(x[-1] < cutoff, forward_pass(params, x, activation_fn=SquarePlus),
forward_pass(params, jax.ops.index_update(x, -1, cutoff), activation_fn=SquarePlus))
return f
def NNP(*args, **kwargs):
"""FFNN potential with cutoff.
:param x: Inter-particle distance
:type x: float
:param params: NN parameters
:type params: NN parameters
:param cutoff: potential cutoff, defaults to None
:type cutoff: float, optional
:return: energy
:rtype: float
"""
return useNN()(*args, **kwargs)
def SPRING(x, stiffness=1.0, length=1.0):
"""Linear spring, v=0.5kd^2.
:param x: Inter-particle distance
:type x: float
:param stiffness: Spring stiffness constant, defaults to 1.0
:type stiffness: float, optional
:param length: Equillibrium length, defaults to 1.0
:type length: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return 0.5*stiffness*(x_ - length)**2
def SPRING4(x, stiffness=1.0, length=1.0):
"""Non-linear spring, v=0.5kd^4.
:param x: Inter-particle distance
:type x: float
:param stiffness: Spring stiffness constant, defaults to 1.0
:type stiffness: float, optional
:param length: Equillibrium length, defaults to 1.0
:type length: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return 0.5*stiffness*(x_ - length)**4
@ nonan
def GRAVITATIONAL(x, Gc=1.0):
"""Gravitational energy, Gc/r.
:param x: Inter-particle distance.
:type x: float
:param Gc: Gravitational constant, defaults to 1.0
:type Gc: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return -Gc/x_
@ nonan
def VANDERWALLS(x, C=4.0):
"""Van Der Walls energy, C/r^12.
:param x: Interatomic distance.
:type x: float
:param C: C, defaults to 4.0
:type C: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.linalg.norm(x, keepdims=True)
return C/x_**12
@ nonan
def x_6(x):
"""x^6
:param x: value
:type x: float
:return: value
:rtype: float
"""
return 1.0/x**6
@ nonan
def x_3(x):
"""x^3
:param x: value
:type x: float
:return: value
:rtype: float
"""
return 1.0/x**3
def LJ(x, sigma=1.0, epsilon=1.0):
"""Lennard-Jones (12-6) interatomic potential function.
:param x: Interatomic distance
:type x: float
:param sigma: sigma, defaults to 1.0
:type sigma: float, optional
:param epsilon: epsilon, defaults to 1.0
:type epsilon: float, optional
:return: energy
:rtype: float
"""
x_ = jnp.sum(jnp.square(x), keepdims=True)
r = x_3(x_)*sigma**6
return 4.0*epsilon*(r**2 - r)
# =============================================
def t1(displacement=lambda a, b: a-b):
"""Create transformation function using displacement function.
:param displacement: Dispalcement function to calculate euclidian displacemnt, defaults to lambda a, b: a - b
:type displacement: Function, optional
"""
def f(R):
Dim = R.shape[1]
# dd = displacement(R.reshape(-1, 1, Dim), R.reshape(1, -1, Dim))
dd = vmap(vmap(displacement, in_axes=(0, None)),
in_axes=(None, 0))(R, R)
indexs = jax.numpy.tril_indices(R.shape[0], k=-1)
# R1, R2 = R[:20], R
# dd = vmap(vmap(displacement, in_axes=(0, None)),
# in_axes=(None, 0))(R1, R2)
# indexs = jax.numpy.triu_indices(R1.shape[0], 1, R2.shape[0])
out = vmap(lambda i, j, dd: dd[i, j], in_axes=(
0, 0, None))(indexs[0], indexs[1], dd)
return out
return f
def t2(q):
"""Apply transformation q -> q - q.mean(axis=0).
:param q: Input array
:type q: Array
:return: Modified array
:rtype: Array
"""
q -= q.mean(axis=0, keepdims=True)
return q
def t3(q):
"""No transformation.
:param q: Input array.
:type q: Array
:return: Same as input.
:rtype: Array
"""
return q
# ================================
def cal_energy_parameters(params, states):
kineticenergy = jnp.array([_T(state.velocity) for state in states])
totallagrangian = jnp.array([lagrangian(state.position.reshape(-1,), state.velocity.reshape(-1,), params)
for state in states])
hamiltonian = 2*kineticenergy - totallagrangian
return totallagrangian, hamiltonian, kineticenergy
def linear_mom_fn(states):
return jnp.array([jnp.sqrt(jnp.square(state.velocity.sum(axis=0)).sum()) for state in states])
def angular_mom_fn(states):
return jnp.array([jnp.sqrt(jnp.square(jnp.cross(state.position, state.velocity).sum(axis=0)).sum()) for state in states])
| 11,352 | 26.030952 | 149 | py |
benchmarking_graph | benchmarking_graph-main/src/Pendulum-LGNN-post-rk.py | ################################################
################## IMPORT ######################
################################################
# from fcntl import F_SEAL_SEAL
import json
import sys
import os
from datetime import datetime
from functools import partial, wraps
from statistics import mode
import fire
import jax
import jax.numpy as jnp
import numpy as np
from jax import jit, random, value_and_grad, vmap
from jax.experimental import optimizers
from jax_md import space
from pyexpat import model
#from shadow.plot import *
import matplotlib.pyplot as plt
#from sklearn.metrics import r2_score
# from scipy.stats import gmean
from psystems.npendulum import (PEF, edge_order, get_init, hconstraints,
pendulum_connections)
MAINPATH = ".." # nopep8
sys.path.append(MAINPATH) # nopep8
import jraph
import src
from jax.config import config
from src import lnn
from src.graph import *
from src.lnn import acceleration, accelerationFull, accelerationTV
from src.md import *
from src.models import MSE, initialize_mlp
from src.nve import NVEStates, nve
from src.utils import *
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
# jax.config.update('jax_platform_name', 'gpu')
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def pprint(*args, namespace=globals()):
for arg in args:
print(f"{namestr(arg, namespace)[0]}: {arg}")
def main(N=2, dim=2, dt=1.0e-5, useN=2, stride=1000, ifdrag=0, seed=100, rname=0, saveovito=1, trainm=1, runs=100, semilog=1, maxtraj=100, plotthings=False, redo=0):
print("Configs: ")
pprint(dt, stride, ifdrag,
namespace=locals())
PSYS = f"{N}-Pendulum"
TAG = f"lgnn"
out_dir = f"../results"
def _filename(name, tag=TAG, trained=None):
if tag == "data":
part = f"_{ifdrag}."
else:
part = f"_{ifdrag}_{trainm}."
if trained is not None:
psys = f"{trained}-{PSYS.split('-')[1]}"
else:
psys = PSYS
name = ".".join(name.split(".")[:-1]) + \
part + name.split(".")[-1]
rstring = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if rname else "0"
filename_prefix = f"{out_dir}/{psys}-{tag}/{rstring}/"
file = f"{filename_prefix}/{name}"
os.makedirs(os.path.dirname(file), exist_ok=True)
filename = f"{filename_prefix}/{name}".replace("//", "/")
print("===", filename, "===")
return filename
def displacement(a, b):
return a - b
def shift(R, dR, V):
return R+dR, V
def OUT(f):
@wraps(f)
def func(file, *args, tag=TAG, trained=None, **kwargs):
return f(_filename(file, tag=tag, trained=trained),
*args, **kwargs)
return func
def _fileexist(f):
if redo:
return False
else:
return os.path.isfile(f)
loadmodel = OUT(src.models.loadmodel)
savemodel = OUT(src.models.savemodel)
loadfile = OUT(src.io.loadfile)
savefile = OUT(src.io.savefile)
save_ovito = OUT(src.io.save_ovito)
fileexist = OUT(_fileexist)
################################################
################## CONFIG ######################
################################################
np.random.seed(seed)
key = random.PRNGKey(seed)
# dataset_states = loadfile(f"model_states.pkl", tag="data")[0]
# model_states = dataset_states[0]
# R = model_states.position[0]
# V = model_states.velocity[0]
# print(
# f"Total number of training data points: {len(dataset_states)}x{model_states.position.shape[0]}")
# N, dim = model_states.position.shape[-2:]
R, V = get_init(N, dim=dim, angles=(-90, 90))
species = jnp.zeros(N, dtype=int)
masses = jnp.ones(N)
################################################
################## SYSTEM ######################
################################################
pot_energy_orig = PEF
kin_energy = partial(lnn._T, mass=masses)
def Lactual(x, v, params):
return kin_energy(v) - pot_energy_orig(x)
def constraints(x, v, params):
return jax.jacobian(lambda x: hconstraints(x.reshape(-1, dim)), 0)(x)
def external_force(x, v, params):
F = 0*R
F = jax.ops.index_update(F, (1, 1), -1.0)
return F.reshape(-1, 1)
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return -0.1*v.reshape(-1, 1)
acceleration_fn_orig = lnn.accelerationFull(N, dim,
lagrangian=Lactual,
non_conservative_forces=drag,
constraints=constraints,
external_force=None)
def force_fn_orig(R, V, params, mass=None):
if mass is None:
return acceleration_fn_orig(R, V, params)
else:
return acceleration_fn_orig(R, V, params)*mass.reshape(-1, 1)
def get_forward_sim(params=None, force_fn=None, runs=10):
@jit
def fn(R, V):
return predition4(R, V, params, force_fn, shift, dt, masses, stride=stride, runs=runs)
return fn
sim_orig = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=maxtraj*runs)
def simGT():
print("Simulating ground truth ...")
_traj = sim_orig(R, V)
metadata = {"key": f"maxtraj={maxtraj}, runs={runs}"}
savefile("gt_trajectories.pkl",
_traj, metadata=metadata)
return _traj
# if fileexist("gt_trajectories.pkl"):
# print("Loading from saved.")
# full_traj, metadata = loadfile("gt_trajectories.pkl")
# full_traj = NVEStates(full_traj)
# if metadata["key"] != f"maxtraj={maxtraj}, runs={runs}":
# print("Metadata doesnot match.")
# full_traj = NVEStates(simGT())
# else:
# full_traj = NVEStates(simGT())
################################################
################### ML Model ###################
################################################
senders, receivers = pendulum_connections(N)
eorder = edge_order(N)
# def L_energy_fn(params, graph):
# g, V, T = cal_graph(params, graph, eorder=eorder, useT=True)
# return T - V
if trainm:
print("kinetic energy: learnable")
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return T - V
else:
print("kinetic energy: 0.5mv^2")
kin_energy = partial(lnn._T, mass=masses)
def L_energy_fn(params, graph):
g, V, T = cal_graph(params, graph, eorder=eorder,
useT=True)
return kin_energy(graph.nodes["velocity"]) - V
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species,
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([N]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def energy_fn(species):
senders, receivers = [np.array(i)
for i in pendulum_connections(R.shape[0])]
state_graph = jraph.GraphsTuple(nodes={
"position": R,
"velocity": V,
"type": species
},
edges={},
senders=senders,
receivers=receivers,
n_node=jnp.array([R.shape[0]]),
n_edge=jnp.array([senders.shape[0]]),
globals={})
def apply(R, V, params):
state_graph.nodes.update(position=R)
state_graph.nodes.update(velocity=V)
return L_energy_fn(params, state_graph)
return apply
apply_fn = energy_fn(species)
v_apply_fn = vmap(apply_fn, in_axes=(None, 0))
def Lmodel(x, v, params): return apply_fn(x, v, params["L"])
def nndrag(v, params):
return - jnp.abs(models.forward_pass(params, v.reshape(-1), activation_fn=models.SquarePlus)) * v
if ifdrag == 0:
print("Drag: 0.0")
def drag(x, v, params):
return 0.0
elif ifdrag == 1:
print("Drag: -0.1*v")
def drag(x, v, params):
return vmap(nndrag, in_axes=(0, None))(v.reshape(-1), params["drag"]).reshape(-1, 1)
acceleration_fn_model = accelerationFull(N, dim,
lagrangian=Lmodel,
constraints=constraints,
non_conservative_forces=drag)
def force_fn_model(R, V, params, mass=None):
if mass is None:
return acceleration_fn_model(R, V, params)
else:
return acceleration_fn_model(R, V, params)*mass.reshape(-1, 1)
params = loadfile(f"trained_model.dil", trained=useN)[0]
sim_model = get_forward_sim(
params=params, force_fn=force_fn_model, runs=runs)
################################################
############## forward simulation ##############
################################################
def norm(a):
a2 = jnp.square(a)
n = len(a2)
a3 = a2.reshape(n, -1)
return jnp.sqrt(a3.sum(axis=1))
def RelErr(ya, yp):
return norm(ya-yp) / (norm(ya) + norm(yp))
def Err(ya, yp):
return ya-yp
def AbsErr(*args):
return jnp.abs(Err(*args))
def cal_energy_fn(lag=None, params=None):
@jit
def fn(states):
KE = vmap(kin_energy)(states.velocity)
L = vmap(lag, in_axes=(0, 0, None)
)(states.position, states.velocity, params)
PE = -(L - KE)
return jnp.array([PE, KE, L, KE+PE]).T
return fn
Es_fn = cal_energy_fn(lag=Lactual, params=None)
Es_pred_fn = cal_energy_fn(lag=Lmodel, params=params)
def net_force_fn(force=None, params=None):
@jit
def fn(states):
return vmap(force, in_axes=(0, 0, None))(states.position, states.velocity, params)
return fn
net_force_orig_fn = net_force_fn(force=force_fn_orig)
net_force_model_fn = net_force_fn(
force=force_fn_model, params=params)
nexp = {
"z_pred": [],
"z_actual": [],
"Zerr": [],
"Herr": [],
"E": [],
}
trajectories = []
sim_orig2 = get_forward_sim(
params=None, force_fn=force_fn_orig, runs=runs)
for ind in range(maxtraj):
print(f"Simulating trajectory {ind}/{maxtraj}")
# R = full_traj[_ind].position
# V = full_traj[_ind].velocity
# start_ = _ind+1
# stop_ = start_+runs
R, V = get_init(N, dim=dim, angles=(-90, 90))
# R = dataset_states[ind].position[0]
# V = dataset_states[ind].velocity[0]
actual_traj = sim_orig2(R, V) # full_traj[start_:stop_]
pred_traj = sim_model(R, V)
if saveovito:
save_ovito(f"pred_{ind}.data", [
state for state in NVEStates(pred_traj)], lattice="")
save_ovito(f"actual_{ind}.data", [
state for state in NVEStates(actual_traj)], lattice="")
trajectories += [(actual_traj, pred_traj)]
savefile("trajectories.pkl", trajectories)
if plotthings:
for key, traj in {"actual": actual_traj, "pred": pred_traj}.items():
print(f"plotting energy ({key})...")
Es = Es_fn(traj)
Es_pred = Es_pred_fn(traj)
Es_pred = Es_pred - Es_pred[0] + Es[0]
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Es_pred, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"(LGNN) {N}-Pendulum Exp rk {ind}"
plt.title(title)
plt.savefig(_filename(title.replace(" ", "-")+f"_{key}.png"))
net_force_orig = net_force_orig_fn(traj)
net_force_model = net_force_model_fn(traj)
plt.clf()
fig, axs = plt.subplots(1+R.shape[0], 1, figsize=(20,
R.shape[0]*5))
for i, ax in zip(range(R.shape[0]+1), axs):
if i == 0:
ax.text(0.6, 0.8, "Averaged over all particles",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig.sum(axis=1), lw=6, label=[
r"$F_x$", r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model.sum(axis=1), "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
else:
ax.text(0.6, 0.8, f"For particle {i}",
transform=ax.transAxes, color="k")
ax.plot(net_force_orig[:, i-1, :], lw=6, label=[r"$F_x$",
r"$F_y$", r"$F_z$"][:R.shape[1]], alpha=0.5)
ax.plot(net_force_model[:, i-1, :], "--", color="k")
ax.plot([], "--", c="k", label="Predicted")
ax.legend(loc=2, bbox_to_anchor=(1, 1),
labelcolor="markerfacecolor")
ax.set_ylabel("Net force")
ax.set_xlabel("Time step")
ax.set_title(f"{N}-Pendulum Exp {ind}")
plt.savefig(_filename(f"net_force_Exp_rk_{ind}_{key}.png"))
Es = Es_fn(actual_traj)
Eshat = Es_fn(pred_traj)
H = Es[:, -1]
Hhat = Eshat[:, -1]
nexp["Herr"] += [RelErr(H, Hhat)]
nexp["E"] += [Es, Eshat]
nexp["z_pred"] += [pred_traj.position]
nexp["z_actual"] += [actual_traj.position]
nexp["Zerr"] += [RelErr(actual_traj.position,
pred_traj.position)]
plt.clf()
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
axs[0].plot(Es, label=["PE", "KE", "L", "TE"], lw=6, alpha=0.5)
axs[1].plot(Eshat, "--", label=["PE", "KE", "L", "TE"])
plt.legend(bbox_to_anchor=(1, 1), loc=2)
axs[0].set_facecolor("w")
#xlabel("Time step", ax=axs[0])
#xlabel("Time step", ax=axs[1])
#ylabel("Energy", ax=axs[0])
#ylabel("Energy", ax=axs[1])
plt.xlabel("Time step")
plt.ylabel("Energy")
title = f"LGNN {N}-Pendulum Exp {ind} Lmodel rk"
axs[1].set_title(title)
title = f"LGNN {N}-Pendulum Exp {ind} Lactual rk"
axs[0].set_title(title)
plt.savefig(_filename(title.replace(" ", "-")+f".png"))
savefile(f"error_parameter.pkl", nexp)
def make_plots(nexp, key, yl="Err", xl="Time", key2=None):
print(f"Plotting err for {key}")
plt.clf()
fig, axs = plt.subplots(1, 1)
filepart = f"{key}"
for i in range(len(nexp[key])):
y = nexp[key][i].flatten()
if key2 is None:
x = range(len(y))
else:
x = nexp[key2][i].flatten()
filepart = f"{filepart}_{key2}"
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.ylabel(yl)
plt.xlabel(xl)
plt.savefig(_filename(f"RelError-rk_{filepart}.png"))
plt.clf()
fig, axs = plt.subplots(1, 1)
mean_ = jnp.log(jnp.array(nexp[key])).mean(axis=0)
std_ = jnp.log(jnp.array(nexp[key])).std(axis=0)
up_b = jnp.exp(mean_ + 2*std_)
low_b = jnp.exp(mean_ - 2*std_)
y = jnp.exp(mean_)
x = range(len(mean_))
if semilog:
plt.semilogy(x, y)
else:
plt.plot(x, y)
plt.fill_between(x, low_b, up_b, alpha=0.5)
plt.ylabel(yl)
plt.xlabel("Time")
plt.savefig(_filename(f"RelError_std-rk_{key}.png"))
make_plots(nexp, "Zerr",
yl=r"$\frac{||\hat{z}-z||_2}{||\hat{z}||_2+||z||_2}$")
make_plots(nexp, "Herr",
yl=r"$\frac{||H(\hat{z})-H(z)||_2}{||H(\hat{z})||_2+||H(z)||_2}$")
gmean_zerr = jnp.exp( jnp.log(jnp.array(nexp["Zerr"])).mean(axis=0) )
gmean_herr = jnp.exp( jnp.log(jnp.array(nexp["Herr"])).mean(axis=0) )
np.savetxt("../zerr/lgnn-rk.txt", gmean_zerr, delimiter = "\n")
np.savetxt("../herr/lgnn-rk.txt", gmean_herr, delimiter = "\n")
fire.Fire(main)
| 17,099 | 31.447818 | 166 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.