repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Meta-RL-Harlow | Meta-RL-Harlow-master/models/ep_lstm_cell.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from dataclasses import dataclass
import numpy as np
import torch as T
from torch import nn
from torch import Tensor
from torch.nn import functional as F
# from models.ep_lstm import EpLSTMCell_Builder
# constants
N_GATES = 5
GateSpans = namedtuple('GateSpans', ['I', 'F', 'G', 'O', 'R'])
ACTIVATIONS = {
'sigmoid': nn.Sigmoid(),
'tanh': nn.Tanh(),
'hard_tanh': nn.Hardtanh(),
'relu': nn.ReLU(),
}
class EpLSTMCell(nn.Module):
def __repr__(self):
return (
f'{self.__class__.__name__}('
+ ', '.join(
[
f'in: {self.Dx}',
f'hid: {self.Dh}',
f'rdo: {self.recurrent_dropout_p} @{self.recurrent_dropout_mode}',
f'vdo: {self.vertical_dropout_p}'
]
)
+')'
)
def __init__(
self,
input_size: int,
args,
):
super().__init__()
self._args = args
self.Dx = input_size
self.Dh = args.hidden_size
self.recurrent_kernel = nn.Linear(self.Dh, self.Dh * N_GATES)
self.input_kernel = nn.Linear(self.Dx, self.Dh * N_GATES)
self.recurrent_dropout_p = args.recurrent_dropout or 0.0
self.vertical_dropout_p = args.vertical_dropout or 0.0
self.recurrent_dropout_mode = args.recurrent_dropout_mode
self.recurrent_dropout = nn.Dropout(self.recurrent_dropout_p)
self.vertical_dropout = nn.Dropout(self.vertical_dropout_p)
self.tied_forget_gate = args.tied_forget_gate
if isinstance(args.recurrent_activation, str):
self.fun_rec = ACTIVATIONS[args.recurrent_activation]
else:
self.fun_rec = args.recurrent_activation
self.reset_parameters_()
# @T.jit.ignore
def get_recurrent_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.recurrent_kernel.weight.chunk(5, 0)
b = self.recurrent_kernel.bias.chunk(5, 0)
W = GateSpans(W[0], W[1], W[2], W[3], W[4])
b = GateSpans(b[0], b[1], b[2], b[3], b[4])
return W, b
# @T.jit.ignore
def get_input_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.input_kernel.weight.chunk(5, 0)
b = self.input_kernel.bias.chunk(5, 0)
W = GateSpans(W[0], W[1], W[2], W[3], W[4])
b = GateSpans(b[0], b[1], b[2], b[3], b[4])
return W, b
@T.jit.ignore
def reset_parameters_(self):
rw, rb = self.get_recurrent_weights()
iw, ib = self.get_input_weights()
nn.init.zeros_(self.input_kernel.bias)
nn.init.zeros_(self.recurrent_kernel.bias)
nn.init.ones_(rb.F)
#^ forget bias
for W in rw:
nn.init.orthogonal_(W)
for W in iw:
nn.init.xavier_uniform_(W)
# @T.jit.export
@T.jit.ignore
def get_init_state(self, input: Tensor) -> Tuple[Tensor, Tensor]:
batch_size = input.shape[1]
zeros = T.zeros(batch_size, self.Dh, device=input.device)
return (zeros, zeros)
def apply_input_kernel(self, xt: Tensor) -> List[Tensor]:
xto = self.vertical_dropout(xt)
out = self.input_kernel(xto).chunk(5, 1)
return out
def apply_recurrent_kernel(self, h_tm1: Tensor):
#^ h_tm1 : [b h]
mode = self.recurrent_dropout_mode
if mode == 'gal_tied':
hto = self.recurrent_dropout(h_tm1)
out = self.recurrent_kernel(hto)
#^ out : [b 5*h]
outs = out.chunk(5, -1)
elif mode == 'gal_gates':
outs = []
WW, bb = self.get_recurrent_weights()
for i in range(5):
hto = self.recurrent_dropout(h_tm1)
outs.append(F.linear(hto, WW[i], bb[i]))
else:
outs = self.recurrent_kernel(h_tm1).chunk(5, -1)
return outs
def forward(self, xt, mt, state):
# type: (Tensor, Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
#^ inputs.xt : [b i]
#^ state.h : [b h]
(h_tm1, c_tm1) = state
Xi, Xf, Xg, Xo, Xr = self.apply_input_kernel(xt)
Hi, Hf, Hg, Ho, Hr = self.apply_recurrent_kernel(h_tm1)
ft = self.fun_rec(Xf + Hf)
ot = self.fun_rec(Xo + Ho)
if self.tied_forget_gate:
it = 1.0 - ft
else:
it = self.fun_rec(Xi + Hi)
gt = T.tanh(Xg + Hg)
if self.recurrent_dropout_mode == 'semeniuta':
#* https://arxiv.org/abs/1603.05118
gt = self.recurrent_dropout(gt)
rt = self.fun_rec(Xr + Hr)
ct = (ft * c_tm1) + (it * gt) + (rt * T.tanh(mt))
ht = ot * T.tanh(ct)
return ht, (ht, ct)
# @T.jit.export
@T.jit.ignore
def loop(self, inputs, memories, state_t0, mask=None):
# type: (Tensor, Tensor, Tuple[Tensor, Tensor], Optional[List[Tensor]]) -> Tuple[List[Tensor], Tuple[Tensor, Tensor]]
'''
This loops over t (time) steps
'''
#^ inputs : t * [b i]
#^ memories : t * [b i]
#^ state_t0[i] : [b s]
#^ out : [t b h]
state = state_t0
outs = []
for xt, mt in zip(inputs, memories):
ht, state = self(xt, mt, state)
outs.append(ht)
return outs, state
| 5,570 | 28.47619 | 125 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_conv_lstm.py | import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
}
def make_layers(cfg, batch_norm=False, in_channels=3):
layers = []
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=4)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ELU()] # this was ReLU
else:
layers += [conv2d, nn.ELU()] # this was ReLU
in_channels = out_channels
return nn.Sequential(*layers)
class Encoder(nn.Module):
def __init__(self, n_channel):
super(Encoder, self).__init__()
cfg = [
n_channel,
n_channel,
'M',
2*n_channel,
2*n_channel,
'M',
4*n_channel,
4*n_channel,
'M',
# (8*n_channel, 0),
# 'M'
]
self.features = make_layers(cfg, batch_norm=True)
def forward(self, inputs):
return self.features(inputs)
class A3C_ConvLSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(A3C_ConvLSTM, self).__init__()
self.encoder = Encoder(config["conv-nchannels"])
if pretrained:
m = model_zoo.load_url(model_urls['cifar100'], map_location=T.device('cpu'))
pretrained_dict = m.state_dict() if isinstance(m, nn.Module) else m
# for i, (k, v) in enumerate(pretrained_dict.items()): print(i, k, v.size())
pretrained_dict = {
k: v for i, (k, v) in enumerate(pretrained_dict.items())
if i < 24
}
self.encoder.load_state_dict(pretrained_dict)
for param in self.encoder.parameters():
param.requires_grad = False
self.working_memory = nn.LSTM(2048+1+num_actions, config["mem-units"])
self.actor = nn.Linear(config["mem-units"], num_actions)
self.critic = nn.Linear(config["mem-units"], 1)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, mem_state=None):
if mem_state is None:
mem_state = self.get_init_states(layer=1)
feats = self.encoder(obs)
feats = feats.view(feats.size(0), -1)
mem_input = T.cat((feats, *p_input), dim=-1).unsqueeze(0)
h_t, mem_state = self.working_memory(mem_input, mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, mem_state
def get_init_states(self, layer, device='cuda'):
hsize = self.working_memory.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0)
class A3C_ConvStackedLSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(A3C_ConvStackedLSTM, self).__init__()
self.encoder = Encoder(config["conv-nchannels"])
if pretrained:
m = model_zoo.load_url(model_urls['cifar100'], map_location=T.device('cpu'))
pretrained_dict = m.state_dict() if isinstance(m, nn.Module) else m
pretrained_dict = {
k: v for i, (k, v) in enumerate(pretrained_dict.items())
if i < 24
}
self.encoder.load_state_dict(pretrained_dict)
for param in self.encoder.parameters():
param.requires_grad = False
self.actor = nn.Linear(128, num_actions)
self.critic = nn.Linear(128, 1)
self.lstm_1 = nn.LSTM(2048+1, config["mem-units"])
self.lstm_2 = nn.LSTM(2048+config["mem-units"]+num_actions, 128)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1=None, state_2=None):
p_action, p_reward = p_input
if state_1 is None:
state_1 = self.get_init_states(layer=1)
if state_2 is None:
state_2 = self.get_init_states(layer=2)
feats = self.encoder(obs)
feats = feats.view(feats.size(0), -1)
input_1 = T.cat((feats, p_reward), dim=-1)
if len(input_1.size()) == 2:
input_1 = input_1.unsqueeze(0)
output_1, state_1 = self.lstm_1(input_1, state_1)
input_2 = T.cat((feats, output_1.squeeze(0), p_action), dim=-1)
if len(input_2.size()) == 2:
input_2 = input_2.unsqueeze(0)
output_2, state_2 = self.lstm_2(input_2, state_2)
action_logits = self.actor(output_2)
value_estimate = self.critic(output_2)
return action_logits, value_estimate, state_1, state_2
def get_init_states(self, layer, device='cuda'):
hsize = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0)
def save_featmaps(self, obs, path, layer=5):
featmaps = self.encoder.features[:layer+1](obs)
np.save(path, featmaps)
| 6,009 | 33.94186 | 104 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_dnd_lstm.py | """
A DND-based LSTM based on ...
Ritter, et al. (2018).
Been There, Done That: Meta-Learning with Episodic Recall.
Proceedings of the International Conference on Machine Learning (ICML).
"""
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from models.dnd import DND
from models.ep_lstm import EpLSTM
class A2C_DND_LSTM(nn.Module):
def __init__(self,
input_dim,
hidden_dim,
num_actions,
dict_len,
kernel='l2',
bias=True
):
super(A2C_DND_LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.bias = bias
# long-term memory
self.dnd = DND(dict_len, hidden_dim, kernel)
# short-term memory
self.ep_lstm = EpLSTM(
input_size=input_dim,
hidden_size=hidden_dim,
num_layers=1,
batch_first=False
)
# intial states of LSTM
self.h0 = nn.Parameter(T.randn(1, self.ep_lstm.hidden_size).float())
self.c0 = nn.Parameter(T.randn(1, self.ep_lstm.hidden_size).float())
# actor-critic networks
self.actor = nn.Linear(hidden_dim, num_actions)
self.critic = nn.Linear(hidden_dim, 1)
self.reset_parameters()
def reset_parameters(self):
# reset lstm parameters
self.ep_lstm.reset_parameters()
# reset initial states
T.nn.init.normal_(self.h0)
T.nn.init.normal_(self.c0)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight, gain=0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight, gain=1.0)
self.critic.bias.data.fill_(0)
def forward(self, data, cue, mem_state):
state, p_action, p_reward, timestep = data
x_t = T.cat((state, p_action, p_reward, timestep), dim=-1)
if mem_state is None:
mem_state = (self.h0, self.c0)
m_t = self.dnd.get_memory(cue)
_, (h_t, c_t) = self.ep_lstm((x_t.unsqueeze(1), m_t.unsqueeze(1)), mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, (h_t, c_t)
def pick_action(self, action_distribution):
"""action selection by sampling from a multinomial.
Parameters
----------
action_distribution : 1d T.tensor
action distribution, pi(a|s)
Returns
-------
T.tensor(int), T.tensor(float)
sampled action, log_prob(sampled action)
"""
m = T.distributions.Categorical(action_distribution)
a_t = m.sample()
log_prob_a_t = m.log_prob(a_t)
return a_t, log_prob_a_t
def get_init_states(self):
return (self.h0, self.c0)
def turn_off_encoding(self):
self.dnd.encoding_off = True
def turn_on_encoding(self):
self.dnd.encoding_off = False
def turn_off_retrieval(self):
self.dnd.retrieval_off = True
def turn_on_retrieval(self):
self.dnd.retrieval_off = False
def reset_memory(self):
self.dnd.reset_memory()
def save_memory(self, mem_key, mem_val):
self.dnd.save_memory(mem_key, mem_val)
def retrieve_memory(self, query_key):
return self.dnd.get_memory(query_key)
def get_all_mems(self):
n_mems = len(self.dnd.keys)
K = [self.dnd.keys[i] for i in range(n_mems)]
V = [self.dnd.vals[i] for i in range(n_mems)]
return K, V
| 3,612 | 27.448819 | 85 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_lstm_simple.py | import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from models.rgu import RGUnit
CELLS = {
'lstm': nn.LSTM,
'gru': nn.GRU,
'rgu': RGUnit
}
class A3C_LSTM(nn.Module):
def __init__(self, input_dim, hidden_size, num_actions, cell_type="lstm"):
super(A3C_LSTM, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(9, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
)
rnn = CELLS[cell_type]
self.cell_type = cell_type
self.working_memory = rnn(128+num_actions+1, hidden_size)
self.actor = nn.Linear(hidden_size, num_actions)
self.critic = nn.Linear(hidden_size, 1)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, mem_state=None):
if mem_state is None:
mem_state = self.get_init_states()
feats = self.encoder(obs)
mem_input = T.cat((feats, *p_input), dim=-1)
if len(mem_input.size()) == 2:
mem_input = mem_input.unsqueeze(0)
h_t, mem_state = self.working_memory(mem_input, mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, mem_state
def get_init_states(self, device='cpu'):
h0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
c0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
return (h0, c0) if self.cell_type in ["lstm", "rgu"] else h0
class A3C_StackedLSTM(nn.Module):
def __init__(self,
input_dim,
hidden_dim,
num_actions,
device="cpu",
):
super(A3C_StackedLSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.device = device
feat_dim = 128
self.encoder = nn.Sequential(
nn.Linear(9, 64),
nn.ReLU(),
nn.Linear(64, feat_dim),
nn.ReLU(),
)
# short-term memory
# self.lstm_1 = nn.LSTM(feat_dim+1, hidden_dim)
# self.lstm_2 = nn.LSTM(feat_dim+num_actions+hidden_dim, hidden_dim // 2)
self.lstm_1 = nn.LSTM(feat_dim, hidden_dim)
self.lstm_2 = nn.LSTM(hidden_dim+1+num_actions, hidden_dim)
self.actor = nn.Linear(hidden_dim, num_actions)
self.critic = nn.Linear(hidden_dim, 1)
self.reset_parameters()
def reset_parameters(self):
T.nn.init.orthogonal_(self.actor.weight, gain=0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight, gain=1.0)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1, state_2):
p_action, p_reward = p_input
feats = self.encoder(obs)
# x_t1 = T.cat((feats, p_reward), dim=-1).unsqueeze(1)
# x_t1 = T.cat((feats, p_action, p_reward), dim=-1).unsqueeze(1)
_, (h_t1, c_t1) = self.lstm_1(feats.unsqueeze(1), state_1)
x_t2 = T.cat((h_t1.squeeze(0), p_reward, p_action), dim=-1).unsqueeze(1)
_, (h_t2, c_t2) = self.lstm_2(x_t2, state_2)
action_logits = self.actor(h_t2)
value_estimate = self.critic(h_t2)
return action_logits, value_estimate, (h_t1, c_t1), (h_t2, c_t2)
def get_init_states(self, layer=1):
hidden_size = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hidden_size).float().to(self.device)
c0 = T.zeros(1, 1, hidden_size).float().to(self.device)
return (h0, c0)
| 3,913 | 29.341085 | 88 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/densenet_lstm.py | import numpy as np
import torch as T
import torchvision
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, freeze = True):
super(Encoder,self).__init__()
original_model = torchvision.models.densenet161(pretrained=True)
self.features = T.nn.Sequential(*list(original_model.children())[:-1])
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
x = self.features(x)
x = F.relu(x, inplace=True)
x = F.avg_pool2d(x, kernel_size=7).view(x.size(0), -1)
return x
class DenseNet_StackedLSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(DenseNet_StackedLSTM, self).__init__()
self.encoder = Encoder(freeze=True)
self.actor = nn.Linear(256, num_actions)
self.critic = nn.Linear(256, 1)
self.lstm_1 = nn.LSTM(2208+1, config["mem-units"])
self.lstm_2 = nn.LSTM(2208+config["mem-units"]+num_actions, 256)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1=None, state_2=None):
p_action, p_reward = p_input
if state_1 is None:
state_1 = self.get_init_states(layer=1)
if state_2 is None:
state_2 = self.get_init_states(layer=2)
feats = self.encoder(obs)
input_1 = T.cat((feats, p_reward), dim=-1)
if len(input_1.size()) == 2:
input_1 = input_1.unsqueeze(0)
output_1, state_1 = self.lstm_1(input_1, state_1)
input_2 = T.cat((feats, output_1.squeeze(0), p_action), dim=-1)
if len(input_2.size()) == 2:
input_2 = input_2.unsqueeze(0)
output_2, state_2 = self.lstm_2(input_2, state_2)
action_logits = self.actor(output_2)
value_estimate = self.critic(output_2)
return action_logits, value_estimate, state_1, state_2
def get_init_states(self, layer, device='cuda'):
hsize = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0)
def save_featmaps(self, obs, path, layer=5):
featmaps = self.encoder.features[:layer+1](obs)
np.save(path, featmaps)
| 2,587 | 31.759494 | 82 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_lstm.py | import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class A3C_LSTM(nn.Module):
def __init__(self, config, num_actions):
super(A3C_LSTM, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=(8, 8), stride=(4, 4)),
nn.Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2)),
nn.Flatten(),
nn.Linear(7200, 256),
nn.ReLU()
)
self.actor = nn.Linear(config["mem-units"], num_actions)
self.critic = nn.Linear(config["mem-units"], 1)
self.working_memory = nn.LSTM(256+num_actions+1, config["mem-units"])
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, mem_state=None):
if mem_state is None:
mem_state = self.get_init_states()
feats = self.encoder(obs)
# import pdb; pdb.set_trace()
mem_input = T.cat((feats, *p_input), dim=-1).unsqueeze(0)
h_t, mem_state = self.working_memory(mem_input, mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, mem_state
def get_init_states(self, device='cpu'):
h0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
c0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
return (h0, c0)
class A3C_StackedLSTM(nn.Module):
def __init__(self, config, num_actions):
super(A3C_StackedLSTM, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=(8, 8), stride=(4, 4)), # output: (16, 20, 20)
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2)), # output: (32, 9, 9)
nn.ReLU(),
nn.Flatten(),
nn.Linear(7200, 256),
nn.ReLU()
)
self.actor = nn.Linear(128, num_actions)
self.critic = nn.Linear(128, 1)
self.lstm_1 = nn.LSTM(256+1, config["mem-units"])
self.lstm_2 = nn.LSTM(256+config["mem-units"]+num_actions, 128)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1=None, state_2=None):
p_action, p_reward = p_input
if state_1 is None:
state_1 = self.get_init_states(layer=1)
if state_2 is None:
state_2 = self.get_init_states(layer=2)
feats = self.encoder(obs)
input_1 = T.cat((feats, p_reward), dim=-1).unsqueeze(0)
output_1, state_1 = self.lstm_1(input_1, state_1)
input_2 = T.cat((feats, output_1.squeeze(0), p_action), dim=-1).unsqueeze(0)
output_2, state_2 = self.lstm_2(input_2, state_2)
action_logits = self.actor(output_2)
value_estimate = self.critic(output_2)
return action_logits, value_estimate, state_1, state_2
def get_init_states(self, layer, device='cpu'):
hsize = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0) | 3,636 | 33.638095 | 88 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/rgu_cell.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from abc import ABC, abstractmethod
from dataclasses import dataclass
import torch as T
from torch import nn
from torch.nn import functional as F
from torch import Tensor
import pdb
__all__ = [
'RGUnit',
'ReciprocallyGated_Cell',
'ReciprocallyGated_Cell_Builder',
]
GateSpans = namedtuple('GateSpans', ['gh', 'gc'])
ACTIVATIONS = {
'sigmoid': nn.Sigmoid(),
'tanh': nn.Tanh(),
'hard_tanh': nn.Hardtanh(),
'relu': nn.ReLU(),
}
class ReciprocallyGated_Cell(nn.Module):
'''
Adapted from:
https://papers.nips.cc/paper/7775-task-driven-convolutional-recurrent-models-of-the-visual-system
arxiv:1807.00053
with modifications.
'''
def __repr__(self):
return (
f'{self.__class__.__name__}('
+ ', '.join(
[
f'in: {self.Dx}',
f'hid: {self.Dh}',
f'rdo: {self.recurrent_dropout_p}',
f'vdo: {self.vertical_dropout_p}',
]
)
+')'
)
def __init__(
self,
input_size: int,
args,
):
super().__init__()
self._args = args
self.Dx = input_size
self.Dh = args.hidden_size
# self.recurrent_kernel = nn.Linear(self.Dh, self.Dh * 2)
# self.cell_memory_kernel = nn.Linear(self.Dh, self.Dh * 2)
# self.input_kernel = nn.Linear(self.Dx, self.Dh * 2)
self.recurrent_kernel = nn.Conv1d(
in_channels=self.Dh,
out_channels=2,
kernel_size=3,
stride=1
)
self.cell_memory_kernel = nn.Conv1d(
in_channels=self.Dh,
out_channels=2,
kernel_size=3,
stride=1
)
self.input_kernel = nn.Conv1d(
in_channels=self.Dh,
out_channels=2,
kernel_size=3,
stride=1
)
self.recurrent_dropout_p = args.recurrent_dropout or 0.0
self.vertical_dropout_p = args.vertical_dropout or 0.0
self.recurrent_dropout = nn.Dropout(self.recurrent_dropout_p)
self.vertical_dropout = nn.Dropout(self.vertical_dropout_p)
self.fun_gate = ACTIVATIONS[args.gate_activation]
self.fun_main = ACTIVATIONS[args.activation]
self.reset_parameters_()
# @T.jit.ignore
def get_recurrent_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.recurrent_kernel.weight.chunk(2, 0)
b = self.recurrent_kernel.bias.chunk(2, 0)
W = GateSpans(W[0], W[1])
b = GateSpans(b[0], b[1])
return W, b
# @T.jit.ignore
def get_cell_memory_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.cell_memory_kernel.weight.chunk(2, 0)
b = self.cell_memory_kernel.bias.chunk(2, 0)
W = GateSpans(W[0], W[1])
b = GateSpans(b[0], b[1])
return W, b
@T.jit.ignore
def reset_parameters_(self):
rw, rb = self.get_recurrent_weights()
iw, ib = self.get_cell_memory_weights()
nn.init.zeros_(self.cell_memory_kernel.bias)
nn.init.constant_(self.recurrent_kernel.bias, 0.5)
for W in rw:
nn.init.orthogonal_(W)
for W in iw:
nn.init.xavier_uniform_(W)
@T.jit.export
def get_init_state(self, input: Tensor) -> Tuple[Tensor, Tensor]:
batch_size = input.shape[1]
zeros = T.zeros(batch_size, self.Dh, device=input.device)
return (zeros, zeros)
def apply_input_kernel(self, xt: Tensor) -> List[Tensor]:
#^ xt : [b h]
xto = self.vertical_dropout(xt)
out = self.input_kernel(xto).chunk(2, 1)
#^ out : [b h]
return out
def apply_recurrent_kernel(self, h_tm1: Tensor) -> List[Tensor]:
#^ h_tm1 : [b h]
hto = self.recurrent_dropout(h_tm1)
out = self.recurrent_kernel(hto).chunk(2, 1)
#^ out : [b h]
return out
def apply_cell_memory_kernel(self, c_tm1: Tensor) -> List[Tensor]:
out = self.cell_memory_kernel(c_tm1).chunk(2, 1)
return out
def forward(self, input, state):
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
#^ input : [b i]
#^ state.h : [b h]
h_tm1, c_tm1 = state
Cc, Ch = self.apply_cell_memory_kernel(c_tm1)
Hc, Hh = self.apply_recurrent_kernel(h_tm1)
Xc, Xh = self.apply_input_kernel(input)
gh = (1 - self.fun_gate(Ch)) * Xh + (1 - self.fun_gate(Hh)) * h_tm1
gc = (1 - self.fun_gate(Hc)) * Xc + (1 - self.fun_gate(Cc)) * c_tm1
ht = self.fun_main(gh)
ct = self.fun_main(gc)
return ht, (ht, ct)
@T.jit.export
def loop(self, inputs, state_t0, mask=None):
# type: (List[Tensor], Tuple[Tensor, Tensor], Optional[List[Tensor]]) -> List[Tensor]
'''
This loops over t (time) steps
'''
#^ inputs : t * [b i]
#^ state_t0[i] : [b s]
#^ out : [t b h]
state = state_t0
outs = []
for xt in inputs:
ht, state = self(xt, state)
outs.append(ht)
return outs, state | 5,418 | 26.93299 | 101 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/rgu.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import torch as T
from torch import nn
from torch.nn import functional as F
from torch import Tensor
from models.rgu_cell import ReciprocallyGated_Cell
@dataclass
class ReciprocallyGated_Cell_Builder:
# input_size: int
hidden_size: int
vertical_dropout : float = 0.0
recurrent_dropout : float = 0.0
input_kernel_initialization : str = 'xavier_uniform'
gate_activation : str = 'sigmoid'
activation : str = 'tanh'
def make(self, input_size: int):
return ReciprocallyGated_Cell(input_size, self)
class RGU_Layer(nn.Module):
def __init__(
self,
cell,
direction='forward',
batch_first=False,
):
super().__init__()
if isinstance(batch_first, bool):
batch_first = (batch_first, batch_first)
self.batch_first = batch_first
self.direction = direction
self.cell_ = cell
@T.jit.ignore
def forward(self, input, state_t0, return_state=None):
if self.batch_first[0]:
#^ input : [b t i]
input = input.transpose(1, 0)
#^ input : [t b i]
inputs = input.unbind(0)
if state_t0 is None:
state_t0 = self.cell_.get_init_state(input)
sequence, state = self.cell_.loop(inputs, state_t0)
#^ sequence : t * [b h]
sequence = T.stack(sequence)
#^ sequence : [t b h]
if self.batch_first[1]:
sequence = sequence.transpose(1, 0)
#^ sequence : [b t h]
return sequence, state
class RGUnit(nn.Module):
def __init__(
self,
input_size : int,
num_layers : int,
batch_first : bool = False,
scripted : bool = True,
*args, **kargs,
):
super().__init__()
self._cell_builder = ReciprocallyGated_Cell_Builder(*args, **kargs)
Dh = self._cell_builder.hidden_size
def make(isize: int):
cell = self._cell_builder.make(isize)
return RGU_Layer(cell, isize, batch_first=batch_first)
rnns = [
make(input_size),
*[
make(Dh)
for _ in range(num_layers - 1)
],
]
self.rnn = nn.Sequential(*rnns)
self.input_size = input_size
self.hidden_size = self._cell_builder.hidden_size
self.num_layers = num_layers
def __repr__(self):
return (
f'${self.__class__.__name__}'
+ '('
+ f'in={self.input_size}, '
+ f'hid={self.hidden_size}, '
+ f'layers={self.num_layers}, '
+ f'bi={self.bidirectional}'
+ '; '
+ str(self._cell_builder)
)
def forward(self, inputs, state_t0=None):
for rnn in self.rnn:
inputs, state = rnn(inputs, state_t0)
return inputs, state
def reset_parameters(self):
for rnn in self.rnn:
rnn.cell_.reset_parameters_()
if __name__ == "__main__":
rgu = RGUnit(
input_size=128,
hidden_size=256,
num_layers=1,
batch_first=False
)
x_t = T.rand(1, 16, 128)
state_0 = (T.zeros(1, 256), T.zeros(1, 256))
h_t, state_t = rgu(x_t, state_0)
import pdb; pdb.set_trace() | 3,620 | 25.23913 | 75 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/resnet_lstm.py | import numpy as np
import torch as T
import torchvision
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
original_model = torchvision.models.resnet18(pretrained=False)
self.features = T.nn.Sequential(*list(original_model.children())[:-1])
def forward(self, x):
x = self.features(x)
return x.view(1, -1)
class ResNet_LSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(ResNet_LSTM, self).__init__()
self.encoder = Encoder()
self.cell_type = config["cell-type"]
# for param in self.encoder.parameters():
# param.requires_grad = False
self.lstm = nn.LSTM(512+1+num_actions, config["mem-units"])
self.actor = nn.Linear(config["mem-units"], num_actions)
self.critic = nn.Linear(config["mem-units"], 1)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state):
feats = self.encoder(obs)
x_t = T.cat((feats, *p_input), dim=-1).unsqueeze(0)
output, state_out = self.lstm(x_t, state)
action_logits = self.actor(output)
value_estimate = self.critic(output)
return action_logits, value_estimate, state_out
def get_init_states(self, device='cuda'):
hsize = self.lstm.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0) if self.cell_type == "lstm" else h0 | 1,788 | 30.385965 | 78 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/Harlow_1D/train.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
from Harlow_1D.harlow import Harlow_1D
from models.a3c_lstm_simple import A3C_LSTM, A3C_StackedLSTM
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train(config,
shared_model,
optimizer,
rank
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
env = Harlow_1D()
if config["mode"] == "vanilla":
agent = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
config["agent"]["cell-type"]
)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
cell_type = config["agent"]["cell-type"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
rnn_state = agent.get_init_states(device)
else:
if cell_type == "lstm":
rnn_state = rnn_state[0].detach(), rnn_state[1].detach()
elif cell_type == "gru":
rnn_state = rnn_state.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, rnn_state = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
episode_reward += reward
p_action = np.eye(env.n_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _ = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
if env.episode_num > env.n_episodes:
np.save(os.path.join(os.path.dirname(save_path), f"rewards_{rank}.npy"), env.reward_counter)
break
def train_stacked(config,
shared_model,
optimizer,
rank
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
env = Harlow_1D()
agent = A3C_StackedLSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
device=config["device"]
)
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
while True:
if done:
h_t1, c_t1 = agent.get_init_states(layer=1)
h_t2, c_t2 = agent.get_init_states(layer=2)
else:
h_t1, c_t1 = h_t1.detach(), c_t1.detach()
h_t2, c_t2 = h_t2.detach(), c_t2.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, (h_t1, c_t1), (h_t2, c_t2) = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(h_t1, c_t1), (h_t2, c_t2)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
episode_reward += reward
p_action = np.eye(env.n_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter,
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _, _ = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(h_t1, c_t1), (h_t2, c_t2)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
if env.episode_num > env.n_episodes:
np.save(os.path.join(os.path.dirname(save_path), f"rewards_{rank}.npy"), env.reward_counter)
break
def train_episodic(config,
shared_model,
optimizer,
rank
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
env = Harlow_1D()
if config["mode"] == "vanilla":
agent = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
)
elif config["mode"] == "episodic":
agent =A3C_DND_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
config["agent"]["dict-len"],
config["agent"]["dict-kernel"]
)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0,0,0], 0
print('='*50)
print(f"Starting Trainer {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
agent.turn_on_encoding()
agent.turn_on_retrieval()
# agent.turn_off_encoding()
# agent.turn_off_retrieval()
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
ht, ct = agent.get_init_states(device)
else:
ht, ct = ht.detach(), ct.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, rnn_state, feats = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
# if reward > 0:
agent.save_memory(feats, ct)
episode_reward += reward
p_action = np.eye(env.n_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0 and rank % 4 == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _, _ = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
if env.episode_num > env.n_episodes:
if rank % 2 == 0:
np.save(os.path.join(os.path.dirname(save_path), f"{rank}_rewards.npy"), env.reward_counter)
break
| 15,561 | 30.502024 | 108 | py |
FEAT | FEAT-master/pretrain.py | import argparse
import os
import os.path as osp
import shutil
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from model.models.classifier import Classifier
from model.dataloader.samplers import CategoriesSampler
from model.utils import pprint, set_gpu, ensure_path, Averager, Timer, count_acc, euclidean_metric
from tensorboardX import SummaryWriter
from tqdm import tqdm
# pre-train model, compute validation acc after 500 epoches
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--max_epoch', type=int, default=500)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['MiniImageNet', 'TieredImagenet', 'CUB'])
parser.add_argument('--backbone_class', type=str, default='Res12', choices=['ConvNet', 'Res12'])
parser.add_argument('--schedule', type=int, nargs='+', default=[75, 150, 300], help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--query', type=int, default=15)
parser.add_argument('--resume', type=bool, default=False)
args = parser.parse_args()
args.orig_imsize = -1
pprint(vars(args))
save_path1 = '-'.join([args.dataset, args.backbone_class, 'Pre'])
save_path2 = '_'.join([str(args.lr), str(args.gamma), str(args.schedule)])
args.save_path = osp.join(save_path1, save_path2)
if not osp.exists(save_path1):
os.mkdir(save_path1)
ensure_path(args.save_path)
if args.dataset == 'MiniImageNet':
# Handle MiniImageNet
from model.dataloader.mini_imagenet import MiniImageNet as Dataset
elif args.dataset == 'CUB':
from model.dataloader.cub import CUB as Dataset
elif args.dataset == 'TieredImagenet':
from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
else:
raise ValueError('Non-supported Dataset.')
trainset = Dataset('train', args, augment=True)
train_loader = DataLoader(dataset=trainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)
args.num_class = trainset.num_class
valset = Dataset('val', args)
val_sampler = CategoriesSampler(valset.label, 200, valset.num_class, 1 + args.query) # test on 16-way 1-shot
val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=8, pin_memory=True)
args.way = valset.num_class
args.shot = 1
# construct model
model = Classifier(args)
if 'Conv' in args.backbone_class:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0005)
elif 'Res' in args.backbone_class:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, nesterov=True, weight_decay=0.0005)
else:
raise ValueError('No Such Encoder')
criterion = torch.nn.CrossEntropyLoss()
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
if args.ngpu > 1:
model.encoder = torch.nn.DataParallel(model.encoder, device_ids=list(range(args.ngpu)))
model = model.cuda()
criterion = criterion.cuda()
def save_model(name):
torch.save(dict(params=model.state_dict()), osp.join(args.save_path, name + '.pth'))
def save_checkpoint(is_best, filename='checkpoint.pth.tar'):
state = {'epoch': epoch + 1,
'args': args,
'state_dict': model.state_dict(),
'trlog': trlog,
'val_acc_dist': trlog['max_acc_dist'],
'val_acc_sim': trlog['max_acc_sim'],
'optimizer' : optimizer.state_dict(),
'global_count': global_count}
torch.save(state, osp.join(args.save_path, filename))
if is_best:
shutil.copyfile(osp.join(args.save_path, filename), osp.join(args.save_path, 'model_best.pth.tar'))
if args.resume == True:
# load checkpoint
state = torch.load(osp.join(args.save_path, 'model_best.pth.tar'))
init_epoch = state['epoch']
resumed_state = state['state_dict']
# resumed_state = {'module.'+k:v for k,v in resumed_state.items()}
model.load_state_dict(resumed_state)
trlog = state['trlog']
optimizer.load_state_dict(state['optimizer'])
initial_lr = optimizer.param_groups[0]['lr']
global_count = state['global_count']
else:
init_epoch = 1
trlog = {}
trlog['args'] = vars(args)
trlog['train_loss'] = []
trlog['val_loss_dist'] = []
trlog['val_loss_sim'] = []
trlog['train_acc'] = []
trlog['val_acc_sim'] = []
trlog['val_acc_dist'] = []
trlog['max_acc_dist'] = 0.0
trlog['max_acc_dist_epoch'] = 0
trlog['max_acc_sim'] = 0.0
trlog['max_acc_sim_epoch'] = 0
initial_lr = args.lr
global_count = 0
timer = Timer()
writer = SummaryWriter(logdir=args.save_path)
for epoch in range(init_epoch, args.max_epoch + 1):
# refine the step-size
if epoch in args.schedule:
initial_lr *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = initial_lr
model.train()
tl = Averager()
ta = Averager()
for i, batch in enumerate(train_loader, 1):
global_count = global_count + 1
if torch.cuda.is_available():
data, label = [_.cuda() for _ in batch]
label = label.type(torch.cuda.LongTensor)
else:
data, label = batch
label = label.type(torch.LongTensor)
logits = model(data)
loss = criterion(logits, label)
acc = count_acc(logits, label)
writer.add_scalar('data/loss', float(loss), global_count)
writer.add_scalar('data/acc', float(acc), global_count)
if (i-1) % 100 == 0:
print('epoch {}, train {}/{}, loss={:.4f} acc={:.4f}'.format(epoch, i, len(train_loader), loss.item(), acc))
tl.add(loss.item())
ta.add(acc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tl = tl.item()
ta = ta.item()
# do not do validation in first 500 epoches
if epoch > 100 or (epoch-1) % 5 == 0:
model.eval()
vl_dist = Averager()
va_dist = Averager()
vl_sim = Averager()
va_sim = Averager()
print('[Dist] best epoch {}, current best val acc={:.4f}'.format(trlog['max_acc_dist_epoch'], trlog['max_acc_dist']))
print('[Sim] best epoch {}, current best val acc={:.4f}'.format(trlog['max_acc_sim_epoch'], trlog['max_acc_sim']))
# test performance with Few-Shot
label = torch.arange(valset.num_class).repeat(args.query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
with torch.no_grad():
for i, batch in tqdm(enumerate(val_loader, 1)):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data, _ = batch
data_shot, data_query = data[:valset.num_class], data[valset.num_class:] # 16-way test
logits_dist, logits_sim = model.forward_proto(data_shot, data_query, valset.num_class)
loss_dist = F.cross_entropy(logits_dist, label)
acc_dist = count_acc(logits_dist, label)
loss_sim = F.cross_entropy(logits_sim, label)
acc_sim = count_acc(logits_sim, label)
vl_dist.add(loss_dist.item())
va_dist.add(acc_dist)
vl_sim.add(loss_sim.item())
va_sim.add(acc_sim)
vl_dist = vl_dist.item()
va_dist = va_dist.item()
vl_sim = vl_sim.item()
va_sim = va_sim.item()
writer.add_scalar('data/val_loss_dist', float(vl_dist), epoch)
writer.add_scalar('data/val_acc_dist', float(va_dist), epoch)
writer.add_scalar('data/val_loss_sim', float(vl_sim), epoch)
writer.add_scalar('data/val_acc_sim', float(va_sim), epoch)
print('epoch {}, val, loss_dist={:.4f} acc_dist={:.4f} loss_sim={:.4f} acc_sim={:.4f}'.format(epoch, vl_dist, va_dist, vl_sim, va_sim))
if va_dist > trlog['max_acc_dist']:
trlog['max_acc_dist'] = va_dist
trlog['max_acc_dist_epoch'] = epoch
save_model('max_acc_dist')
save_checkpoint(True)
if va_sim > trlog['max_acc_sim']:
trlog['max_acc_sim'] = va_sim
trlog['max_acc_sim_epoch'] = epoch
save_model('max_acc_sim')
save_checkpoint(True)
trlog['train_loss'].append(tl)
trlog['train_acc'].append(ta)
trlog['val_loss_dist'].append(vl_dist)
trlog['val_acc_dist'].append(va_dist)
trlog['val_loss_sim'].append(vl_sim)
trlog['val_acc_sim'].append(va_sim)
save_model('epoch-last')
print('ETA:{}/{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch)))
writer.close()
import pdb
pdb.set_trace() | 9,931 | 42.946903 | 147 | py |
FEAT | FEAT-master/train_fsl.py | import numpy as np
import torch
from model.trainer.fsl_trainer import FSLTrainer
from model.utils import (
pprint, set_gpu,
get_command_line_parser,
postprocess_args,
)
# from ipdb import launch_ipdb_on_exception
if __name__ == '__main__':
parser = get_command_line_parser()
args = postprocess_args(parser.parse_args())
# with launch_ipdb_on_exception():
pprint(vars(args))
set_gpu(args.gpu)
trainer = FSLTrainer(args)
trainer.train()
trainer.evaluate_test()
trainer.final_record()
print(args.save_path)
| 561 | 20.615385 | 48 | py |
FEAT | FEAT-master/model/data_parallel.py | from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
| 3,764 | 40.373626 | 84 | py |
FEAT | FEAT-master/model/utils.py | import os
import shutil
import time
import pprint
import torch
import argparse
import numpy as np
def one_hot(indices, depth):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))
if indices.is_cuda:
encoded_indicies = encoded_indicies.cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
def set_gpu(x):
os.environ['CUDA_VISIBLE_DEVICES'] = x
print('using gpu:', x)
def ensure_path(dir_path, scripts_to_save=None):
if os.path.exists(dir_path):
if input('{} exists, remove? ([y]/n)'.format(dir_path)) != 'n':
shutil.rmtree(dir_path)
os.mkdir(dir_path)
else:
os.mkdir(dir_path)
print('Experiment dir : {}'.format(dir_path))
if scripts_to_save is not None:
script_path = os.path.join(dir_path, 'scripts')
if not os.path.exists(script_path):
os.makedirs(script_path)
for src_file in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(src_file))
print('copy {} to {}'.format(src_file, dst_file))
if os.path.isdir(src_file):
shutil.copytree(src_file, dst_file)
else:
shutil.copyfile(src_file, dst_file)
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
def count_acc(logits, label):
pred = torch.argmax(logits, dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
else:
return (pred == label).type(torch.FloatTensor).mean().item()
def euclidean_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = -((a - b)**2).sum(dim=2)
return logits
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
_utils_pp = pprint.PrettyPrinter()
def pprint(x):
_utils_pp.pprint(x)
def compute_confidence_interval(data):
"""
Compute 95% confidence interval
:param data: An array of mean accuracy (or mAP) across a number of sampled episodes.
:return: the 95% confidence interval for this data.
"""
a = 1.0 * np.array(data)
m = np.mean(a)
std = np.std(a)
pm = 1.96 * (std / np.sqrt(len(a)))
return m, pm
def postprocess_args(args):
args.num_classes = args.way
save_path1 = '-'.join([args.dataset, args.model_class, args.backbone_class, '{:02d}w{:02d}s{:02}q'.format(args.way, args.shot, args.query)])
save_path2 = '_'.join([str('_'.join(args.step_size.split(','))), str(args.gamma),
'lr{:.2g}mul{:.2g}'.format(args.lr, args.lr_mul),
str(args.lr_scheduler),
'T1{}T2{}'.format(args.temperature, args.temperature2),
'b{}'.format(args.balance),
'bsz{:03d}'.format( max(args.way, args.num_classes)*(args.shot+args.query) ),
# str(time.strftime('%Y%m%d_%H%M%S'))
])
if args.init_weights is not None:
save_path1 += '-Pre'
if args.use_euclidean:
save_path1 += '-DIS'
else:
save_path1 += '-SIM'
if args.fix_BN:
save_path2 += '-FBN'
if not args.augment:
save_path2 += '-NoAug'
if not os.path.exists(os.path.join(args.save_dir, save_path1)):
os.mkdir(os.path.join(args.save_dir, save_path1))
args.save_path = os.path.join(args.save_dir, save_path1, save_path2)
return args
def get_command_line_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--max_epoch', type=int, default=200)
parser.add_argument('--episodes_per_epoch', type=int, default=100)
parser.add_argument('--num_eval_episodes', type=int, default=600)
parser.add_argument('--model_class', type=str, default='FEAT',
choices=['MatchNet', 'ProtoNet', 'BILSTM', 'DeepSet', 'GCN', 'FEAT', 'FEATSTAR', 'SemiFEAT', 'SemiProtoFEAT']) # None for MatchNet or ProtoNet
parser.add_argument('--use_euclidean', action='store_true', default=False)
parser.add_argument('--backbone_class', type=str, default='ConvNet',
choices=['ConvNet', 'Res12', 'Res18', 'WRN'])
parser.add_argument('--dataset', type=str, default='MiniImageNet',
choices=['MiniImageNet', 'TieredImageNet', 'CUB'])
parser.add_argument('--way', type=int, default=5)
parser.add_argument('--eval_way', type=int, default=5)
parser.add_argument('--shot', type=int, default=1)
parser.add_argument('--eval_shot', type=int, default=1)
parser.add_argument('--query', type=int, default=15)
parser.add_argument('--eval_query', type=int, default=15)
parser.add_argument('--balance', type=float, default=0)
parser.add_argument('--temperature', type=float, default=1)
parser.add_argument('--temperature2', type=float, default=1) # the temperature in the
# optimization parameters
parser.add_argument('--orig_imsize', type=int, default=-1) # -1 for no cache, and -2 for no resize, only for MiniImageNet and CUB
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--lr_mul', type=float, default=10)
parser.add_argument('--lr_scheduler', type=str, default='step', choices=['multistep', 'step', 'cosine'])
parser.add_argument('--step_size', type=str, default='20')
parser.add_argument('--gamma', type=float, default=0.2)
parser.add_argument('--fix_BN', action='store_true', default=False) # means we do not update the running mean/var in BN, not to freeze BN
parser.add_argument('--augment', action='store_true', default=False)
parser.add_argument('--multi_gpu', action='store_true', default=False)
parser.add_argument('--gpu', default='0')
parser.add_argument('--init_weights', type=str, default=None)
# usually untouched parameters
parser.add_argument('--mom', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=0.0005) # we find this weight decay value works the best
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--log_interval', type=int, default=50)
parser.add_argument('--eval_interval', type=int, default=1)
parser.add_argument('--save_dir', type=str, default='./checkpoints')
return parser
| 7,275 | 38.32973 | 166 | py |
FEAT | FEAT-master/model/trainer/base.py | import abc
import torch
import os.path as osp
from model.utils import (
ensure_path,
Averager, Timer, count_acc,
compute_confidence_interval,
)
from model.logger import Logger
class Trainer(object, metaclass=abc.ABCMeta):
def __init__(self, args):
self.args = args
# ensure_path(
# self.args.save_path,
# scripts_to_save=['model/models', 'model/networks', __file__],
# )
self.logger = Logger(args, osp.join(args.save_path))
self.train_step = 0
self.train_epoch = 0
self.max_steps = args.episodes_per_epoch * args.max_epoch
self.dt, self.ft = Averager(), Averager()
self.bt, self.ot = Averager(), Averager()
self.timer = Timer()
# train statistics
self.trlog = {}
self.trlog['max_acc'] = 0.0
self.trlog['max_acc_epoch'] = 0
self.trlog['max_acc_interval'] = 0.0
@abc.abstractmethod
def train(self):
pass
@abc.abstractmethod
def evaluate(self, data_loader):
pass
@abc.abstractmethod
def evaluate_test(self, data_loader):
pass
@abc.abstractmethod
def final_record(self):
pass
def try_evaluate(self, epoch):
args = self.args
if self.train_epoch % args.eval_interval == 0:
vl, va, vap = self.evaluate(self.val_loader)
self.logger.add_scalar('val_loss', float(vl), self.train_epoch)
self.logger.add_scalar('val_acc', float(va), self.train_epoch)
print('epoch {}, val, loss={:.4f} acc={:.4f}+{:.4f}'.format(epoch, vl, va, vap))
if va >= self.trlog['max_acc']:
self.trlog['max_acc'] = va
self.trlog['max_acc_interval'] = vap
self.trlog['max_acc_epoch'] = self.train_epoch
self.save_model('max_acc')
def try_logging(self, tl1, tl2, ta, tg=None):
args = self.args
if self.train_step % args.log_interval == 0:
print('epoch {}, train {:06g}/{:06g}, total loss={:.4f}, loss={:.4f} acc={:.4f}, lr={:.4g}'
.format(self.train_epoch,
self.train_step,
self.max_steps,
tl1.item(), tl2.item(), ta.item(),
self.optimizer.param_groups[0]['lr']))
self.logger.add_scalar('train_total_loss', tl1.item(), self.train_step)
self.logger.add_scalar('train_loss', tl2.item(), self.train_step)
self.logger.add_scalar('train_acc', ta.item(), self.train_step)
if tg is not None:
self.logger.add_scalar('grad_norm', tg.item(), self.train_step)
print('data_timer: {:.2f} sec, ' \
'forward_timer: {:.2f} sec,' \
'backward_timer: {:.2f} sec, ' \
'optim_timer: {:.2f} sec'.format(
self.dt.item(), self.ft.item(),
self.bt.item(), self.ot.item())
)
self.logger.dump()
def save_model(self, name):
torch.save(
dict(params=self.model.state_dict()),
osp.join(self.args.save_path, name + '.pth')
)
def __str__(self):
return "{}({})".format(
self.__class__.__name__,
self.model.__class__.__name__
)
| 3,407 | 33.77551 | 103 | py |
FEAT | FEAT-master/model/trainer/fsl_trainer.py | import time
import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from model.trainer.base import Trainer
from model.trainer.helpers import (
get_dataloader, prepare_model, prepare_optimizer,
)
from model.utils import (
pprint, ensure_path,
Averager, Timer, count_acc, one_hot,
compute_confidence_interval,
)
from tensorboardX import SummaryWriter
from collections import deque
from tqdm import tqdm
class FSLTrainer(Trainer):
def __init__(self, args):
super().__init__(args)
self.train_loader, self.val_loader, self.test_loader = get_dataloader(args)
self.model, self.para_model = prepare_model(args)
self.optimizer, self.lr_scheduler = prepare_optimizer(self.model, args)
def prepare_label(self):
args = self.args
# prepare one-hot label
label = torch.arange(args.way, dtype=torch.int16).repeat(args.query)
label_aux = torch.arange(args.way, dtype=torch.int8).repeat(args.shot + args.query)
label = label.type(torch.LongTensor)
label_aux = label_aux.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
label_aux = label_aux.cuda()
return label, label_aux
def train(self):
args = self.args
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
# start FSL training
label, label_aux = self.prepare_label()
for epoch in range(1, args.max_epoch + 1):
self.train_epoch += 1
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
tl1 = Averager()
tl2 = Averager()
ta = Averager()
start_tm = time.time()
for batch in self.train_loader:
self.train_step += 1
if torch.cuda.is_available():
data, gt_label = [_.cuda() for _ in batch]
else:
data, gt_label = batch[0], batch[1]
data_tm = time.time()
self.dt.add(data_tm - start_tm)
# get saved centers
logits, reg_logits = self.para_model(data)
if reg_logits is not None:
loss = F.cross_entropy(logits, label)
total_loss = loss + args.balance * F.cross_entropy(reg_logits, label_aux)
else:
loss = F.cross_entropy(logits, label)
total_loss = F.cross_entropy(logits, label)
tl2.add(loss)
forward_tm = time.time()
self.ft.add(forward_tm - data_tm)
acc = count_acc(logits, label)
tl1.add(total_loss.item())
ta.add(acc)
self.optimizer.zero_grad()
total_loss.backward()
backward_tm = time.time()
self.bt.add(backward_tm - forward_tm)
self.optimizer.step()
optimizer_tm = time.time()
self.ot.add(optimizer_tm - backward_tm)
# refresh start_tm
start_tm = time.time()
self.lr_scheduler.step()
self.try_evaluate(epoch)
print('ETA:{}/{}'.format(
self.timer.measure(),
self.timer.measure(self.train_epoch / args.max_epoch))
)
torch.save(self.trlog, osp.join(args.save_path, 'trlog'))
self.save_model('epoch-last')
def evaluate(self, data_loader):
# restore model args
args = self.args
# evaluation mode
self.model.eval()
record = np.zeros((args.num_eval_episodes, 2)) # loss and acc
label = torch.arange(args.eval_way, dtype=torch.int16).repeat(args.eval_query)
label = label.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
print('best epoch {}, best val acc={:.4f} + {:.4f}'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
with torch.no_grad():
for i, batch in enumerate(data_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
logits = self.model(data)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
record[i-1, 0] = loss.item()
record[i-1, 1] = acc
assert(i == record.shape[0])
vl, _ = compute_confidence_interval(record[:,0])
va, vap = compute_confidence_interval(record[:,1])
# train mode
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
return vl, va, vap
def evaluate_test(self):
# restore model args
args = self.args
# evaluation mode
self.model.load_state_dict(torch.load(osp.join(self.args.save_path, 'max_acc.pth'))['params'])
self.model.eval()
record = np.zeros((10000, 2)) # loss and acc
label = torch.arange(args.eval_way, dtype=torch.int16).repeat(args.eval_query)
label = label.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
print('best epoch {}, best val acc={:.4f} + {:.4f}'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
with torch.no_grad():
for i, batch in tqdm(enumerate(self.test_loader, 1)):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
logits = self.model(data)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
record[i-1, 0] = loss.item()
record[i-1, 1] = acc
assert(i == record.shape[0])
vl, _ = compute_confidence_interval(record[:,0])
va, vap = compute_confidence_interval(record[:,1])
self.trlog['test_acc'] = va
self.trlog['test_acc_interval'] = vap
self.trlog['test_loss'] = vl
print('best epoch {}, best val acc={:.4f} + {:.4f}\n'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
print('Test acc={:.4f} + {:.4f}\n'.format(
self.trlog['test_acc'],
self.trlog['test_acc_interval']))
return vl, va, vap
def final_record(self):
# save the best performance in a txt file
with open(osp.join(self.args.save_path, '{}+{}'.format(self.trlog['test_acc'], self.trlog['test_acc_interval'])), 'w') as f:
f.write('best epoch {}, best val acc={:.4f} + {:.4f}\n'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
f.write('Test acc={:.4f} + {:.4f}\n'.format(
self.trlog['test_acc'],
self.trlog['test_acc_interval'])) | 7,495 | 35.038462 | 132 | py |
FEAT | FEAT-master/model/trainer/helpers.py | import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
from torch.utils.data import DataLoader
from model.dataloader.samplers import CategoriesSampler, RandomSampler, ClassSampler
from model.models.protonet import ProtoNet
from model.models.matchnet import MatchNet
from model.models.feat import FEAT
from model.models.featstar import FEATSTAR
from model.models.deepset import DeepSet
from model.models.bilstm import BILSTM
from model.models.graphnet import GCN
from model.models.semi_feat import SemiFEAT
from model.models.semi_protofeat import SemiProtoFEAT
class MultiGPUDataloader:
def __init__(self, dataloader, num_device):
self.dataloader = dataloader
self.num_device = num_device
def __len__(self):
return len(self.dataloader) // self.num_device
def __iter__(self):
data_iter = iter(self.dataloader)
done = False
while not done:
try:
output_batch = ([], [])
for _ in range(self.num_device):
batch = next(data_iter)
for i, v in enumerate(batch):
output_batch[i].append(v[None])
yield ( torch.cat(_, dim=0) for _ in output_batch )
except StopIteration:
done = True
return
def get_dataloader(args):
if args.dataset == 'MiniImageNet':
# Handle MiniImageNet
from model.dataloader.mini_imagenet import MiniImageNet as Dataset
elif args.dataset == 'CUB':
from model.dataloader.cub import CUB as Dataset
elif args.dataset == 'TieredImageNet':
from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
else:
raise ValueError('Non-supported Dataset.')
num_device = torch.cuda.device_count()
num_episodes = args.episodes_per_epoch*num_device if args.multi_gpu else args.episodes_per_epoch
num_workers=args.num_workers*num_device if args.multi_gpu else args.num_workers
trainset = Dataset('train', args, augment=args.augment)
args.num_class = trainset.num_class
train_sampler = CategoriesSampler(trainset.label,
num_episodes,
max(args.way, args.num_classes),
args.shot + args.query)
train_loader = DataLoader(dataset=trainset,
num_workers=num_workers,
batch_sampler=train_sampler,
pin_memory=True)
#if args.multi_gpu and num_device > 1:
#train_loader = MultiGPUDataloader(train_loader, num_device)
#args.way = args.way * num_device
valset = Dataset('val', args)
val_sampler = CategoriesSampler(valset.label,
args.num_eval_episodes,
args.eval_way, args.eval_shot + args.eval_query)
val_loader = DataLoader(dataset=valset,
batch_sampler=val_sampler,
num_workers=args.num_workers,
pin_memory=True)
testset = Dataset('test', args)
test_sampler = CategoriesSampler(testset.label,
10000, # args.num_eval_episodes,
args.eval_way, args.eval_shot + args.eval_query)
test_loader = DataLoader(dataset=testset,
batch_sampler=test_sampler,
num_workers=args.num_workers,
pin_memory=True)
return train_loader, val_loader, test_loader
def prepare_model(args):
model = eval(args.model_class)(args)
# load pre-trained model (no FC weights)
if args.init_weights is not None:
model_dict = model.state_dict()
pretrained_dict = torch.load(args.init_weights)['params']
if args.backbone_class == 'ConvNet':
pretrained_dict = {'encoder.'+k: v for k, v in pretrained_dict.items()}
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
print(pretrained_dict.keys())
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
if args.multi_gpu:
model.encoder = nn.DataParallel(model.encoder, dim=0)
para_model = model.to(device)
else:
para_model = model.to(device)
return model, para_model
def prepare_optimizer(model, args):
top_para = [v for k,v in model.named_parameters() if 'encoder' not in k]
# as in the literature, we use ADAM for ConvNet and SGD for other backbones
if args.backbone_class == 'ConvNet':
optimizer = optim.Adam(
[{'params': model.encoder.parameters()},
{'params': top_para, 'lr': args.lr * args.lr_mul}],
lr=args.lr,
# weight_decay=args.weight_decay, do not use weight_decay here
)
else:
optimizer = optim.SGD(
[{'params': model.encoder.parameters()},
{'params': top_para, 'lr': args.lr * args.lr_mul}],
lr=args.lr,
momentum=args.mom,
nesterov=True,
weight_decay=args.weight_decay
)
if args.lr_scheduler == 'step':
lr_scheduler = optim.lr_scheduler.StepLR(
optimizer,
step_size=int(args.step_size),
gamma=args.gamma
)
elif args.lr_scheduler == 'multistep':
lr_scheduler = optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[int(_) for _ in args.step_size.split(',')],
gamma=args.gamma,
)
elif args.lr_scheduler == 'cosine':
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer,
args.max_epoch,
eta_min=0 # a tuning parameter
)
else:
raise ValueError('No Such Scheduler')
return optimizer, lr_scheduler
| 6,374 | 38.351852 | 100 | py |
FEAT | FEAT-master/model/networks/dropblock.py | import torch
import torch.nn.functional as F
from torch import nn
from torch.distributions import Bernoulli
class DropBlock(nn.Module):
def __init__(self, block_size):
super(DropBlock, self).__init__()
self.block_size = block_size
def forward(self, x, gamma):
# shape: (bsize, channels, height, width)
if self.training:
batch_size, channels, height, width = x.shape
bernoulli = Bernoulli(gamma)
mask = bernoulli.sample((batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1)))
if torch.cuda.is_available():
mask = mask.cuda()
block_mask = self._compute_block_mask(mask)
countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3]
count_ones = block_mask.sum()
return block_mask * x * (countM / count_ones)
else:
return x
def _compute_block_mask(self, mask):
left_padding = int((self.block_size-1) / 2)
right_padding = int(self.block_size / 2)
batch_size, channels, height, width = mask.shape
non_zero_idxs = mask.nonzero()
nr_blocks = non_zero_idxs.shape[0]
offsets = torch.stack(
[
torch.arange(self.block_size).view(-1, 1).expand(self.block_size, self.block_size).reshape(-1), # - left_padding,
torch.arange(self.block_size).repeat(self.block_size), #- left_padding
]
).t()
offsets = torch.cat((torch.zeros(self.block_size**2, 2).long(), offsets.long()), 1)
if torch.cuda.is_available():
offsets = offsets.cuda()
if nr_blocks > 0:
non_zero_idxs = non_zero_idxs.repeat(self.block_size ** 2, 1)
offsets = offsets.repeat(nr_blocks, 1).view(-1, 4)
offsets = offsets.long()
block_idxs = non_zero_idxs + offsets
#block_idxs += left_padding
padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))
padded_mask[block_idxs[:, 0], block_idxs[:, 1], block_idxs[:, 2], block_idxs[:, 3]] = 1.
else:
padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))
block_mask = 1 - padded_mask#[:height, :width]
return block_mask
| 2,392 | 37.596774 | 129 | py |
FEAT | FEAT-master/model/networks/convnet.py | import torch.nn as nn
# Basic ConvNet with Pooling layer
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(2)
)
class ConvNet(nn.Module):
def __init__(self, x_dim=3, hid_dim=64, z_dim=64):
super().__init__()
self.encoder = nn.Sequential(
conv_block(x_dim, hid_dim),
conv_block(hid_dim, hid_dim),
conv_block(hid_dim, hid_dim),
conv_block(hid_dim, z_dim),
)
def forward(self, x):
x = self.encoder(x)
x = nn.MaxPool2d(5)(x)
x = x.view(x.size(0), -1)
return x
| 735 | 23.533333 | 59 | py |
FEAT | FEAT-master/model/networks/res12.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from model.networks.dropblock import DropBlock
# This ResNet network was designed following the practice of the following papers:
# TADAM: Task dependent adaptive metric for improved few-shot learning (Oreshkin et al., in NIPS 2018) and
# A Simple Neural Attentive Meta-Learner (Mishra et al., in ICLR 2018).
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False, block_size=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.LeakyReLU(0.1)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv3x3(planes, planes)
self.bn3 = nn.BatchNorm2d(planes)
self.maxpool = nn.MaxPool2d(stride)
self.downsample = downsample
self.stride = stride
self.drop_rate = drop_rate
self.num_batches_tracked = 0
self.drop_block = drop_block
self.block_size = block_size
self.DropBlock = DropBlock(block_size=self.block_size)
def forward(self, x):
self.num_batches_tracked += 1
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
out = self.maxpool(out)
if self.drop_rate > 0:
if self.drop_block == True:
feat_size = out.size()[2]
keep_rate = max(1.0 - self.drop_rate / (20*2000) * (self.num_batches_tracked), 1.0 - self.drop_rate)
gamma = (1 - keep_rate) / self.block_size**2 * feat_size**2 / (feat_size - self.block_size + 1)**2
out = self.DropBlock(out, gamma=gamma)
else:
out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True)
return out
class ResNet(nn.Module):
def __init__(self, block=BasicBlock, keep_prob=1.0, avg_pool=True, drop_rate=0.1, dropblock_size=5):
self.inplanes = 3
super(ResNet, self).__init__()
self.layer1 = self._make_layer(block, 64, stride=2, drop_rate=drop_rate)
self.layer2 = self._make_layer(block, 160, stride=2, drop_rate=drop_rate)
self.layer3 = self._make_layer(block, 320, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
self.layer4 = self._make_layer(block, 640, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
if avg_pool:
self.avgpool = nn.AvgPool2d(5, stride=1)
self.keep_prob = keep_prob
self.keep_avg_pool = avg_pool
self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False)
self.drop_rate = drop_rate
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size))
self.inplanes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.keep_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def Res12(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-12 model.
"""
model = ResNet(BasicBlock, keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)
return model
| 4,705 | 36.349206 | 125 | py |
FEAT | FEAT-master/model/networks/WRN28.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys
import numpy as np
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = int((depth-4)/6)
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3,nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 21)
out = out.view(out.size(0), -1)
return out | 2,858 | 34.296296 | 98 | py |
FEAT | FEAT-master/model/networks/res18.py | import torch.nn as nn
__all__ = ['resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block=BasicBlock, layers=[2, 2, 2, 2], zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def resnet10(**kwargs):
"""Constructs a ResNet-10 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-152 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model | 5,632 | 28.492147 | 106 | py |
FEAT | FEAT-master/model/models/base.py | import torch
import torch.nn as nn
import numpy as np
class FewShotModel(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
if args.backbone_class == 'ConvNet':
from model.networks.convnet import ConvNet
self.encoder = ConvNet()
elif args.backbone_class == 'Res12':
hdim = 640
from model.networks.res12 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'Res18':
hdim = 512
from model.networks.res18 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'WRN':
hdim = 640
from model.networks.WRN28 import Wide_ResNet
self.encoder = Wide_ResNet(28, 10, 0.5) # we set the dropout=0.5 directly here, it may achieve better results by tunning the dropout
else:
raise ValueError('')
def split_instances(self, data):
args = self.args
if self.training:
return (torch.Tensor(np.arange(args.way*args.shot)).long().view(1, args.shot, args.way),
torch.Tensor(np.arange(args.way*args.shot, args.way * (args.shot + args.query))).long().view(1, args.query, args.way))
else:
return (torch.Tensor(np.arange(args.eval_way*args.eval_shot)).long().view(1, args.eval_shot, args.eval_way),
torch.Tensor(np.arange(args.eval_way*args.eval_shot, args.eval_way * (args.eval_shot + args.eval_query))).long().view(1, args.eval_query, args.eval_way))
def forward(self, x, get_feature=False):
if get_feature:
# get feature with the provided embeddings
return self.encoder(x)
else:
# feature extraction
x = x.squeeze(0)
instance_embs = self.encoder(x)
num_inst = instance_embs.shape[0]
# split support query set for few-shot data
support_idx, query_idx = self.split_instances(x)
if self.training:
logits, logits_reg = self._forward(instance_embs, support_idx, query_idx)
return logits, logits_reg
else:
logits = self._forward(instance_embs, support_idx, query_idx)
return logits
def _forward(self, x, support_idx, query_idx):
raise NotImplementedError('Suppose to be implemented by subclass') | 2,434 | 43.272727 | 174 | py |
FEAT | FEAT-master/model/models/graphnet.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from itertools import permutations
import scipy.sparse as sp
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
if torch.cuda.is_available():
return torch.sparse.FloatTensor(indices, values, shape).cuda()
else:
return torch.sparse.FloatTensor(indices, values, shape)
class GraphFunc(nn.Module):
def __init__(self, z_dim):
super(GraphFunc, self).__init__()
"""
DeepSets Function
"""
self.gc1 = GraphConvolution(z_dim, z_dim * 4)
self.gc2 = GraphConvolution(z_dim * 4, z_dim)
self.z_dim = z_dim
def forward(self, graph_input_raw, graph_label):
"""
set_input, seq_length, set_size, dim
"""
set_length, set_size, dim = graph_input_raw.shape
assert(dim == self.z_dim)
set_output_list = []
for g_index in range(set_length):
graph_input = graph_input_raw[g_index, :]
# construct the adj matrix
unique_class = np.unique(graph_label)
edge_set = []
for c in unique_class:
current_index = np.where(graph_label == c)[0].tolist()
if len(current_index) > 1:
edge_set.append(np.array(list(permutations(current_index, 2))))
if len(edge_set) == 0:
adj = sp.coo_matrix((np.array([0]), (np.array([0]), np.array([0]))),
shape=(graph_label.shape[0], graph_label.shape[0]),
dtype=np.float32)
else:
edge_set = np.concatenate(edge_set, 0)
adj = sp.coo_matrix((np.ones(edge_set.shape[0]), (edge_set[:, 0], edge_set[:, 1])),
shape=(graph_label.shape[0], graph_label.shape[0]),
dtype=np.float32)
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
# do GCN process
residual = graph_input
graph_input = F.relu(self.gc1(graph_input, adj))
graph_input = F.dropout(graph_input, 0.5, training=self.training)
graph_input = self.gc2(graph_input, adj)
set_output = residual + graph_input
set_output_list.append(set_output)
return torch.stack(set_output_list)
class GCN(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.graph_func = GraphFunc(hdim)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
if self.training:
graph_label = torch.arange(self.args.way).long()
else:
graph_label = torch.arange(self.args.eval_way).long()
proto = self.graph_func(proto, graph_label)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# do not use contrastive regularization for GCN (since there are only one sinlge instances class in each auxiliary task)
if self.training:
return logits, None
else:
return logits
| 6,810 | 37.480226 | 128 | py |
FEAT | FEAT-master/model/models/deepset.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
class DeepSetsFunc(nn.Module):
def __init__(self, z_dim):
super(DeepSetsFunc, self).__init__()
"""
DeepSets Function
"""
self.gen1 = nn.Linear(z_dim, z_dim * 4)
self.gen2 = nn.Linear(z_dim*4, z_dim)
self.gen3 = nn.Linear(z_dim * 2, z_dim * 4)
self.gen4 = nn.Linear(z_dim*4, z_dim)
self.z_dim = z_dim
def forward(self, set_input):
"""
set_input, seq_length, set_size, dim
"""
set_length, set_size, dim = set_input.shape
assert(dim == self.z_dim)
mask_one = torch.ones(set_size, set_size) - torch.eye(set_size, set_size)
mask_one = mask_one.view(1, set_size, set_size, 1)
if torch.cuda.is_available():
mask_one = mask_one.cuda()
combined_mean = torch.mul(set_input.unsqueeze(2), mask_one).max(1)[0] # 75 x 6 x 64, we can also try max here
# do a bilinear transformation
combined_mean = F.relu(self.gen1(combined_mean.view(-1, self.z_dim)))
combined_mean = self.gen2(combined_mean)
combined_mean_cat = torch.cat([set_input.contiguous().view(-1, self.z_dim), combined_mean], 1)
# do linear transformation
combined_mean_cat = F.relu(self.gen3(combined_mean_cat))
combined_mean_cat = self.gen4(combined_mean_cat)
combined_mean_cat = combined_mean_cat.view(-1, set_size, self.z_dim)
set_output = set_input + combined_mean_cat
return set_output
class DeepSet(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.set_func = DeepSetsFunc(hdim)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.set_func(proto)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.set_func(aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 5,338 | 43.865546 | 117 | py |
FEAT | FEAT-master/model/models/semi_protofeat.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
from model.utils import one_hot
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class SemiProtoFEAT(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def get_proto(self, x_shot, x_pool):
# get the prototypes based w/ an unlabeled pool set
num_batch, num_shot, num_way, emb_dim = x_shot.shape
num_pool_shot = x_pool.shape[1]
num_pool = num_pool_shot * num_way
label_support = torch.arange(num_way).repeat(num_shot).type(torch.LongTensor)
label_support_onehot = one_hot(label_support, num_way)
label_support_onehot = label_support_onehot.unsqueeze(0).repeat([num_batch, 1, 1])
if torch.cuda.is_available():
label_support_onehot = label_support_onehot.cuda()
proto_shot = x_shot.mean(dim = 1)
if self.args.use_euclidean:
dis = - torch.sum((proto_shot.unsqueeze(1).expand(num_batch, num_pool, num_way, emb_dim).contiguous().view(num_batch*num_pool, num_way, emb_dim) - x_pool.view(-1, emb_dim).unsqueeze(1)) ** 2, 2) / self.args.temperature
else:
dis = torch.bmm(x_pool.view(num_batch, -1, emb_dim), F.normalize(proto_shot, dim=-1).permute([0,2,1])) / self.args.temperature
dis = dis.view(num_batch, -1, num_way)
z_hat = F.softmax(dis, dim=2)
z = torch.cat([label_support_onehot, z_hat], dim = 1) # (num_batch, n_shot + n_pool, n_way)
h = torch.cat([x_shot.view(num_batch, -1, emb_dim), x_pool.view(num_batch, -1, emb_dim)], dim = 1) # (num_batch, n_shot + n_pool, n_embedding)
proto = torch.bmm(z.permute([0,2,1]), h)
sum_z = z.sum(dim = 1).view((num_batch, -1, 1))
proto = proto / sum_z
return proto
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
num_batch = support.shape[0]
num_shot, num_way = support.shape[1], support.shape[2]
num_query = np.prod(query_idx.shape[-2:])
# transformation
whole_set = torch.cat([support.view(num_batch, -1, emb_dim), query.view(num_batch, -1, emb_dim)], 1)
support = self.slf_attn(support.view(num_batch, -1, emb_dim), whole_set, whole_set).view(num_batch, num_shot, num_way, emb_dim)
# get mean of the support
proto = self.get_proto(support, query) # we can also use adapted query set here to achieve better results
# proto = support.mean(dim=1) # Ntask x NK x d
num_proto = proto.shape[1]
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 8,560 | 45.781421 | 230 | py |
FEAT | FEAT-master/model/models/protonet.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
# Note: As in Protonet, we use Euclidean Distances here, you can change to the Cosine Similarity by replace
# TRUE in line 30 as self.args.use_euclidean
class ProtoNet(FewShotModel):
def __init__(self, args):
super().__init__(args)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.flatten()].view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.flatten()].view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
if True: # self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim)
proto = proto.contiguous().view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else: # cosine similarity: more memory efficient
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
# (num_batch, num_emb, num_proto) * (num_batch, num_query*num_proto, num_emb) -> (num_batch, num_query*num_proto, num_proto)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
if self.training:
return logits, None
else:
return logits
| 2,007 | 40.833333 | 137 | py |
FEAT | FEAT-master/model/models/bilstm.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from model.models import FewShotModel
class BidirectionalLSTM(nn.Module):
def __init__(self, layer_sizes, vector_dim):
super(BidirectionalLSTM, self).__init__()
"""
Initializes a multi layer bidirectional LSTM
:param layer_sizes: A list containing the neuron numbers per layer
e.g. [100, 100, 100] returns a 3 layer, 100
:param batch_size: The experiments batch size
"""
self.hidden_size = layer_sizes[0]
self.vector_dim = vector_dim
self.num_layers = len(layer_sizes)
self.lstm = nn.LSTM(input_size=self.vector_dim,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
bidirectional=True)
def forward(self, inputs, batch_size):
"""
Runs the bidirectional LSTM, produces outputs and saves both forward and backward states as well as gradients.
:param x: The inputs should be a list of shape [sequence_length, batch_size, 64]
:return: Returns the LSTM outputs, as well as the forward and backward hidden states.
"""
c0 = Variable(torch.rand(self.lstm.num_layers*2, batch_size, self.lstm.hidden_size),
requires_grad=False)
h0 = Variable(torch.rand(self.lstm.num_layers*2, batch_size, self.lstm.hidden_size),
requires_grad=False)
if torch.cuda.is_available():
c0 = c0.cuda()
h0 = h0.cuda()
output, (hn, cn) = self.lstm(inputs, (h0, c0))
# residual addition
output = output + inputs
return output # , hn, cn
class BILSTM(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.bilstm = BidirectionalLSTM(layer_sizes=[hdim // 2],
vector_dim = hdim)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.bilstm(proto.permute([1, 0, 2]), num_batch)
proto = proto.permute([1, 0, 2])
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.bilstm(aux_task.permute([1, 0, 2]), num_batch * self.args.way) # T x N x (K+Kq) x d
aux_emb = aux_emb.permute([1, 0, 2])
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 5,746 | 45.723577 | 118 | py |
FEAT | FEAT-master/model/models/classifier.py | import torch
import torch.nn as nn
import numpy as np
from model.utils import euclidean_metric
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
if args.backbone_class == 'ConvNet':
from model.networks.convnet import ConvNet
hdim = 64
self.encoder = ConvNet()
elif args.backbone_class == 'Res12':
hdim = 640
from model.networks.res12 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'Res18':
hdim = 512
from model.networks.res18 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'WRN':
hdim = 640
from model.networks.WRN28 import Wide_ResNet
self.encoder = Wide_ResNet(28, 10, 0.5)
else:
raise ValueError('')
self.fc = nn.Linear(hdim, args.num_class)
def forward(self, data, is_emb = False):
out = self.encoder(data)
if not is_emb:
out = self.fc(out)
return out
def forward_proto(self, data_shot, data_query, way = None):
if way is None:
way = self.args.num_class
proto = self.encoder(data_shot)
proto = proto.reshape(self.args.shot, way, -1).mean(dim=0)
query = self.encoder(data_query)
logits_dist = euclidean_metric(query, proto)
logits_sim = torch.mm(query, F.normalize(proto, p=2, dim=-1).t())
return logits_dist, logits_sim | 1,617 | 32.708333 | 75 | py |
FEAT | FEAT-master/model/models/featstar.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
# No-Reg for FEAT-STAR here
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class FEATSTAR(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
query = query.view(-1, emb_dim).unsqueeze(1)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim)
# refine by Transformer
combined = torch.cat([proto, query], 1) # Nk x (N + 1) x d, batch_size = NK
combined = self.slf_attn(combined, combined, combined)
# compute distance for all batches
proto, query = combined.split(num_proto, 1)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else: # cosine similarity: more memory efficient
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
return logits, None
| 4,959 | 37.153846 | 114 | py |
FEAT | FEAT-master/model/models/matchnet.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
from model.utils import one_hot
# Note: This is the MatchingNet without FCE
# it predicts an instance based on nearest neighbor rule (not Nearest center mean)
class MatchNet(FewShotModel):
def __init__(self, args):
super().__init__(args)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.flatten()].view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.flatten()].view( *(query_idx.shape + (-1,)))
if self.training:
label_support = torch.arange(self.args.way).repeat(self.args.shot).type(torch.LongTensor)
label_support_onehot = one_hot(label_support, self.args.way)
else:
label_support = torch.arange(self.args.eval_way).repeat(self.args.eval_shot).type(torch.LongTensor)
label_support_onehot = one_hot(label_support, self.args.eval_way)
if torch.cuda.is_available():
label_support_onehot = label_support_onehot.cuda() # KN x N
# get mean of the support
num_batch = support.shape[0]
num_way = support.shape[2]
num_support = np.prod(support.shape[1:3])
num_query = np.prod(query_idx.shape[-2:])
support = support.view(num_batch, num_support, emb_dim) # Ntask x NK x d
label_support_onehot = label_support_onehot.unsqueeze(0).repeat(num_batch, 1, 1)
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
support = F.normalize(support, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
# (num_batch, num_emb, num_proto) * (num_batch, num_query*num_proto, num_emb) -> (num_batch, num_query*num_proto, num_proto)
logits = torch.bmm(query, support.permute([0,2,1]))
logits = torch.bmm(logits, label_support_onehot) / self.args.temperature # KqN x N
logits = logits.view(-1, num_way)
if self.training:
return logits, None
else:
return logits
| 2,299 | 40.818182 | 133 | py |
FEAT | FEAT-master/model/models/feat.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class FEAT(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.slf_attn(proto, proto, proto)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 6,494 | 42.590604 | 119 | py |
FEAT | FEAT-master/model/models/semi_feat.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class SemiFEAT(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
whole_set = torch.cat([proto, query.view(num_batch, -1, emb_dim)], 1)
proto = self.slf_attn(proto, whole_set, whole_set)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 6,584 | 42.9 | 119 | py |
FEAT | FEAT-master/model/dataloader/tiered_imagenet.py | from __future__ import print_function
import os
import os.path as osp
import numpy as np
import pickle
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
# Set the appropriate paths of the datasets here.
THIS_PATH = osp.dirname(__file__)
ROOT_PATH1 = osp.abspath(osp.join(THIS_PATH, '..', '..', '..'))
ROOT_PATH2 = osp.abspath(osp.join(THIS_PATH, '..', '..'))
IMAGE_PATH = osp.join(ROOT_PATH1, 'data/tieredimagenet/')
SPLIT_PATH = osp.join(ROOT_PATH2, 'data/miniimagenet/split')
def buildLabelIndex(labels):
label2inds = {}
for idx, label in enumerate(labels):
if label not in label2inds:
label2inds[label] = []
label2inds[label].append(idx)
return label2inds
def load_data(file):
try:
with open(file, 'rb') as fo:
data = pickle.load(fo)
return data
except:
with open(file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
data = u.load()
return data
file_path = {'train':[os.path.join(IMAGE_PATH, 'train_images.npz'), os.path.join(IMAGE_PATH, 'train_labels.pkl')],
'val':[os.path.join(IMAGE_PATH, 'val_images.npz'), os.path.join(IMAGE_PATH,'val_labels.pkl')],
'test':[os.path.join(IMAGE_PATH, 'test_images.npz'), os.path.join(IMAGE_PATH, 'test_labels.pkl')]}
class tieredImageNet(data.Dataset):
def __init__(self, setname, args, augment=False):
assert(setname=='train' or setname=='val' or setname=='test')
image_path = file_path[setname][0]
label_path = file_path[setname][1]
data_train = load_data(label_path)
labels = data_train['labels']
self.data = np.load(image_path)['images']
label = []
lb = -1
self.wnids = []
for wnid in labels:
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
label.append(lb)
self.label = label
self.num_class = len(set(label))
if augment and setname == 'train':
transforms_list = [
transforms.RandomCrop(84, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
else:
transforms_list = [
transforms.ToTensor(),
]
# Transformation
if args.backbone_class == 'ConvNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([0.485, 0.456, 0.406]),
np.array([0.229, 0.224, 0.225]))
])
elif args.backbone_class == 'ResNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))
])
elif args.backbone_class == 'Res12':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]),
np.array([x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]))
])
elif args.backbone_class == 'Res18':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif args.backbone_class == 'WRN':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
raise ValueError('Non-supported Network Types. Please Revise Data Pre-Processing Scripts.')
def __getitem__(self, index):
img, label = self.data[index], self.label[index]
img = self.transform(Image.fromarray(img))
return img, label
def __len__(self):
return len(self.data)
| 4,423 | 35.561983 | 114 | py |
FEAT | FEAT-master/model/dataloader/cub.py | import os.path as osp
import PIL
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
THIS_PATH = osp.dirname(__file__)
ROOT_PATH1 = osp.abspath(osp.join(THIS_PATH, '..', '..', '..'))
ROOT_PATH2 = osp.abspath(osp.join(THIS_PATH, '..', '..'))
IMAGE_PATH = osp.join(ROOT_PATH1, 'data/cub')
SPLIT_PATH = osp.join(ROOT_PATH2, 'data/cub/split')
CACHE_PATH = osp.join(ROOT_PATH2, '.cache/')
# This is for the CUB dataset
# It is notable, we assume the cub images are cropped based on the given bounding boxes
# The concept labels are based on the attribute value, which are for further use (and not used in this work)
class CUB(Dataset):
def __init__(self, setname, args, augment=False):
im_size = args.orig_imsize
txt_path = osp.join(SPLIT_PATH, setname + '.csv')
lines = [x.strip() for x in open(txt_path, 'r').readlines()][1:]
cache_path = osp.join( CACHE_PATH, "{}.{}.{}.pt".format(self.__class__.__name__, setname, im_size) )
self.use_im_cache = ( im_size != -1 ) # not using cache
if self.use_im_cache:
if not osp.exists(cache_path):
print('* Cache miss... Preprocessing {}...'.format(setname))
resize_ = identity if im_size < 0 else transforms.Resize(im_size)
data, label = self.parse_csv(txt_path)
self.data = [ resize_(Image.open(path).convert('RGB')) for path in data ]
self.label = label
print('* Dump cache from {}'.format(cache_path))
torch.save({'data': self.data, 'label': self.label }, cache_path)
else:
print('* Load cache from {}'.format(cache_path))
cache = torch.load(cache_path)
self.data = cache['data']
self.label = cache['label']
else:
self.data, self.label = self.parse_csv(txt_path)
self.num_class = np.unique(np.array(self.label)).shape[0]
image_size = 84
if augment and setname == 'train':
transforms_list = [
transforms.RandomResizedCrop(image_size),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
else:
transforms_list = [
transforms.Resize(92),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
]
# Transformation
if args.backbone_class == 'ConvNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([0.485, 0.456, 0.406]),
np.array([0.229, 0.224, 0.225]))
])
elif args.backbone_class == 'Res12':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]),
np.array([x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]))
])
elif args.backbone_class == 'Res18':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif args.backbone_class == 'WRN':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
raise ValueError('Non-supported Network Types. Please Revise Data Pre-Processing Scripts.')
def parse_csv(self, txt_path):
data = []
label = []
lb = -1
self.wnids = []
lines = [x.strip() for x in open(txt_path, 'r').readlines()][1:]
for l in lines:
context = l.split(',')
name = context[0]
wnid = context[1]
path = osp.join(IMAGE_PATH, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
return data, label
def __len__(self):
return len(self.data)
def __getitem__(self, i):
data, label = self.data[i], self.label[i]
if self.use_im_cache:
image = self.transform(data)
else:
image = self.transform(Image.open(data).convert('RGB'))
return image, label
| 4,840 | 38.040323 | 112 | py |
FEAT | FEAT-master/model/dataloader/mini_imagenet.py | import torch
import os.path as osp
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm import tqdm
import numpy as np
THIS_PATH = osp.dirname(__file__)
ROOT_PATH = osp.abspath(osp.join(THIS_PATH, '..', '..'))
ROOT_PATH2 = osp.abspath(osp.join(THIS_PATH, '..', '..', '..'))
IMAGE_PATH1 = osp.join(ROOT_PATH2, 'data/miniimagenet/images')
SPLIT_PATH = osp.join(ROOT_PATH, 'data/miniimagenet/split')
CACHE_PATH = osp.join(ROOT_PATH, '.cache/')
def identity(x):
return x
class MiniImageNet(Dataset):
""" Usage:
"""
def __init__(self, setname, args, augment=False):
im_size = args.orig_imsize
csv_path = osp.join(SPLIT_PATH, setname + '.csv')
cache_path = osp.join( CACHE_PATH, "{}.{}.{}.pt".format(self.__class__.__name__, setname, im_size) )
self.use_im_cache = ( im_size != -1 ) # not using cache
if self.use_im_cache:
if not osp.exists(cache_path):
print('* Cache miss... Preprocessing {}...'.format(setname))
resize_ = identity if im_size < 0 else transforms.Resize(im_size)
data, label = self.parse_csv(csv_path, setname)
self.data = [ resize_(Image.open(path).convert('RGB')) for path in data ]
self.label = label
print('* Dump cache from {}'.format(cache_path))
torch.save({'data': self.data, 'label': self.label }, cache_path)
else:
print('* Load cache from {}'.format(cache_path))
cache = torch.load(cache_path)
self.data = cache['data']
self.label = cache['label']
else:
self.data, self.label = self.parse_csv(csv_path, setname)
self.num_class = len(set(self.label))
image_size = 84
if augment and setname == 'train':
transforms_list = [
transforms.RandomResizedCrop(image_size),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
else:
transforms_list = [
transforms.Resize(92),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
]
# Transformation
if args.backbone_class == 'ConvNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([0.485, 0.456, 0.406]),
np.array([0.229, 0.224, 0.225]))
])
elif args.backbone_class == 'Res12':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]),
np.array([x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]))
])
elif args.backbone_class == 'Res18':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif args.backbone_class == 'WRN':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
raise ValueError('Non-supported Network Types. Please Revise Data Pre-Processing Scripts.')
def parse_csv(self, csv_path, setname):
lines = [x.strip() for x in open(csv_path, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.wnids = []
for l in tqdm(lines, ncols=64):
name, wnid = l.split(',')
path = osp.join(IMAGE_PATH1, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append( path )
label.append(lb)
return data, label
def __len__(self):
return len(self.data)
def __getitem__(self, i):
data, label = self.data[i], self.label[i]
if self.use_im_cache:
image = self.transform(data)
else:
image = self.transform(Image.open(data).convert('RGB'))
return image, label
| 4,581 | 36.252033 | 112 | py |
FEAT | FEAT-master/model/dataloader/samplers.py | import torch
import numpy as np
class CategoriesSampler():
def __init__(self, label, n_batch, n_cls, n_per):
self.n_batch = n_batch
self.n_cls = n_cls
self.n_per = n_per
label = np.array(label)
self.m_ind = []
for i in range(max(label) + 1):
ind = np.argwhere(label == i).reshape(-1)
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
classes = torch.randperm(len(self.m_ind))[:self.n_cls]
for c in classes:
l = self.m_ind[c]
pos = torch.randperm(len(l))[:self.n_per]
batch.append(l[pos])
batch = torch.stack(batch).t().reshape(-1)
yield batch
class RandomSampler():
def __init__(self, label, n_batch, n_per):
self.n_batch = n_batch
self.n_per = n_per
self.label = np.array(label)
self.num_label = self.label.shape[0]
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = torch.randperm(self.num_label)[:self.n_per]
yield batch
# sample for each class
class ClassSampler():
def __init__(self, label, n_per=None):
self.n_per = n_per
label = np.array(label)
self.m_ind = []
for i in range(max(label) + 1):
ind = np.argwhere(label == i).reshape(-1)
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return len(self.m_ind)
def __iter__(self):
classes = torch.arange(len(self.m_ind))
for c in classes:
l = self.m_ind[int(c)]
if self.n_per is None:
pos = torch.randperm(len(l))
else:
pos = torch.randperm(len(l))[:self.n_per]
yield l[pos]
# for ResNet Fine-Tune, which output the same index of task examples several times
class InSetSampler():
def __init__(self, n_batch, n_sbatch, pool): # pool is a tensor
self.n_batch = n_batch
self.n_sbatch = n_sbatch
self.pool = pool
self.pool_size = pool.shape[0]
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = self.pool[torch.randperm(self.pool_size)[:self.n_sbatch]]
yield batch | 2,586 | 27.119565 | 82 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/examples/keras_nonvis.py | # thanks to @edersantana and @fchollet for suggestions & help.
import numpy as np
from ple import PLE # our environment
from ple.games.catcher import Catcher
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from example_support import ExampleAgent, ReplayMemory, loop_play_forever
class Agent(ExampleAgent):
"""
Our agent takes 1D inputs which are flattened.
We define a full connected model below.
"""
def __init__(self, *args, **kwargs):
ExampleAgent.__init__(self, *args, **kwargs)
self.state_dim = self.env.getGameStateDims()
self.state_shape = np.prod((num_frames,) + self.state_dim)
self.input_shape = (batch_size, self.state_shape)
def build_model(self):
model = Sequential()
model.add(Dense(
input_dim=self.state_shape, output_dim=256, activation="relu", init="he_uniform"
))
model.add(Dense(
512, activation="relu", init="he_uniform"
))
model.add(Dense(
self.num_actions, activation="linear", init="he_uniform"
))
model.compile(loss=self.q_loss, optimizer=SGD(lr=self.lr))
self.model = model
def nv_state_preprocessor(state):
"""
This preprocesses our state from PLE. We rescale the values to be between
0,1 and -1,1.
"""
# taken by inspection of source code. Better way is on its way!
max_values = np.array([128.0, 20.0, 128.0, 128.0])
state = np.array([state.values()]) / max_values
return state.flatten()
if __name__ == "__main__":
# this takes about 15 epochs to converge to something that performs decently.
# feel free to play with the parameters below.
# training parameters
num_epochs = 15
num_steps_train = 15000 # steps per epoch of training
num_steps_test = 3000
update_frequency = 4 # step frequency of model training/updates
# agent settings
batch_size = 32
num_frames = 4 # number of frames in a 'state'
frame_skip = 2
# percentage of time we perform a random action, help exploration.
epsilon = 0.15
epsilon_steps = 30000 # decay steps
epsilon_min = 0.1
lr = 0.01
discount = 0.95 # discount factor
rng = np.random.RandomState(24)
# memory settings
max_memory_size = 100000
min_memory_size = 1000 # number needed before model training starts
epsilon_rate = (epsilon - epsilon_min) / epsilon_steps
# PLE takes our game and the state_preprocessor. It will process the state
# for our agent.
game = Catcher(width=128, height=128)
env = PLE(game, fps=60, state_preprocessor=nv_state_preprocessor)
agent = Agent(env, batch_size, num_frames, frame_skip, lr,
discount, rng, optimizer="sgd_nesterov")
agent.build_model()
memory = ReplayMemory(max_memory_size, min_memory_size)
env.init()
for epoch in range(1, num_epochs + 1):
steps, num_episodes = 0, 0
losses, rewards = [], []
env.display_screen = False
# training loop
while steps < num_steps_train:
episode_reward = 0.0
agent.start_episode()
while env.game_over() == False and steps < num_steps_train:
state = env.getGameState()
reward, action = agent.act(state, epsilon=epsilon)
memory.add([state, action, reward, env.game_over()])
if steps % update_frequency == 0:
loss = memory.train_agent_batch(agent)
if loss is not None:
losses.append(loss)
epsilon = np.max(epsilon_min, epsilon - epsilon_rate)
episode_reward += reward
steps += 1
if num_episodes % 5 == 0:
print "Episode {:01d}: Reward {:0.1f}".format(num_episodes, episode_reward)
rewards.append(episode_reward)
num_episodes += 1
agent.end_episode()
print "\nTrain Epoch {:02d}: Epsilon {:0.4f} | Avg. Loss {:0.3f} | Avg. Reward {:0.3f}".format(epoch, epsilon, np.mean(losses), np.sum(rewards) / num_episodes)
steps, num_episodes = 0, 0
losses, rewards = [], []
# display the screen
env.display_screen = True
# slow it down so we can watch it fail!
env.force_fps = False
# testing loop
while steps < num_steps_test:
episode_reward = 0.0
agent.start_episode()
while env.game_over() == False and steps < num_steps_test:
state = env.getGameState()
reward, action = agent.act(state, epsilon=0.05)
episode_reward += reward
steps += 1
# done watching after 500 steps.
if steps > 500:
env.force_fps = True
env.display_screen = False
if num_episodes % 5 == 0:
print "Episode {:01d}: Reward {:0.1f}".format(num_episodes, episode_reward)
rewards.append(episode_reward)
num_episodes += 1
agent.end_episode()
print "Test Epoch {:02d}: Best Reward {:0.3f} | Avg. Reward {:0.3f}".format(epoch, np.max(rewards), np.sum(rewards) / num_episodes)
print "\nTraining complete. Will loop forever playing!"
loop_play_forever(env, agent)
| 5,449 | 31.634731 | 167 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/examples/example_support.py | import numpy as np
from collections import deque
# keras and model related
from keras.models import Sequential
from keras.layers.core import Dense, Flatten
from keras.layers.convolutional import Convolution2D
from keras.optimizers import SGD, Adam, RMSprop
import theano.tensor as T
class ExampleAgent():
"""
Implements a DQN-ish agent. It has replay memory and epsilon decay. It is missing model freezing. The models are sensitive to the parameters and if applied to other games must be tinkered with.
"""
def __init__(self, env, batch_size, num_frames,
frame_skip, lr, discount, rng, optimizer="adam", frame_dim=None):
self.env = env
self.batch_size = batch_size
self.num_frames = num_frames
self.frame_skip = frame_skip
self.lr = lr
self.discount = discount
self.rng = rng
if optimizer == "adam":
opt = Adam(lr=self.lr)
elif optimizer == "sgd":
opt = SGD(lr=self.lr)
elif optimizer == "sgd_nesterov":
opt = SGD(lr=self.lr, nesterov=True)
elif optimizer == "rmsprop":
opt = RMSprop(lr=self.lr, rho=0.9, epsilon=0.003)
else:
raise ValueError("Unrecognized optmizer")
self.optimizer = opt
self.frame_dim = self.env.getScreenDims() if frame_dim is None else frame_dim
self.state_shape = (num_frames,) + self.frame_dim
self.input_shape = (batch_size,) + self.state_shape
self.state = deque(maxlen=num_frames)
self.actions = self.env.getActionSet()
self.num_actions = len(self.actions)
self.model = None
def q_loss(self, y_true, y_pred):
# assume clip_delta is 1.0
# along with sum accumulator.
diff = y_true - y_pred
_quad = T.minimum(abs(diff), 1.0)
_lin = abs(diff) - _quad
loss = 0.5 * _quad ** 2 + _lin
loss = T.sum(loss)
return loss
def build_model(self):
model = Sequential()
model.add(Convolution2D(
16, 8, 8, input_shape=(self.num_frames,) + self.frame_dim,
subsample=(4, 4), activation="relu", init="he_uniform"
))
model.add(Convolution2D(
16, 4, 4, subsample=(2, 2), activation="relu", init="he_uniform"
))
model.add(Convolution2D(
32, 3, 3, subsample=(1, 1), activation="relu", init="he_uniform"
))
model.add(Flatten())
model.add(Dense(
512, activation="relu", init="he_uniform"
))
model.add(Dense(
self.num_actions, activation="linear", init="he_uniform"
))
model.compile(loss=self.q_loss, optimizer=self.optimizer)
self.model = model
def predict_single(self, state):
"""
model is expecting a batch_size worth of data. We only have one states worth of
samples so we make an empty batch and set our state as the first row.
"""
states = np.zeros(self.input_shape)
states[0, ...] = state.reshape(self.state_shape)
return self.model.predict(states)[0] # only want the first value
def _argmax_rand(self, arr):
# picks a random index if there is a tie
return self.rng.choice(np.where(arr == np.max(arr))[0])
def _best_action(self, state):
q_vals = self.predict_single(state)
return self._argmax_rand(q_vals) # the action with the best Q-value
def act(self, state, epsilon=1.0):
self.state.append(state)
action = self.rng.randint(0, self.num_actions)
if len(self.state) == self.num_frames: # we havent seen enough frames
_state = np.array(self.state)
if self.rng.rand() > epsilon:
action = self._best_action(_state) # exploit
reward = 0.0
for i in range(self.frame_skip): # we repeat each action a few times
# act on the environment
reward += self.env.act(self.actions[action])
reward = np.clip(reward, -1.0, 1.0)
return reward, action
def start_episode(self, N=3):
self.env.reset_game() # reset
for i in range(self.rng.randint(N)):
self.env.act(self.env.NOOP) # perform a NOOP
def end_episode(self):
self.state.clear()
class ReplayMemory():
def __init__(self, max_size, min_size):
self.min_replay_size = min_size
self.memory = deque(maxlen=max_size)
def __len__(self):
return len(self.memory)
def add(self, transition):
self.memory.append(transition)
def train_agent_batch(self, agent):
if len(self.memory) > self.min_replay_size:
states, targets = self._random_batch(agent) # get a random batch
return agent.model.train_on_batch(states, targets) # ERR?
else:
return None
def _random_batch(self, agent):
inputs = np.zeros(agent.input_shape)
targets = np.zeros((agent.batch_size, agent.num_actions))
seen = []
idx = agent.rng.randint(
0,
high=len(
self.memory) -
agent.num_frames -
1)
for i in range(agent.batch_size):
while idx in seen:
idx = agent.rng.randint(0, high=len(
self.memory) - agent.num_frames - 1)
states = np.array([self.memory[idx + j][0]
for j in range(agent.num_frames + 1)])
art = np.array([self.memory[idx + j][1:]
for j in range(agent.num_frames)])
actions = art[:, 0].astype(int)
rewards = art[:, 1]
terminals = art[:, 2]
state = states[:-1]
state_next = states[1:]
inputs[i, ...] = state.reshape(agent.state_shape)
# we could make zeros but pointless.
targets[i] = agent.predict_single(state)
Q_prime = np.max(agent.predict_single(state_next))
targets[i, actions] = rewards + \
(1 - terminals) * (agent.discount * Q_prime)
seen.append(idx)
return inputs, targets
def loop_play_forever(env, agent):
# our forever play loop
try:
# slow it down
env.display_screen = True
env.force_fps = False
while True:
agent.start_episode()
episode_reward = 0.0
while env.game_over() == False:
state = env.getGameState()
reward, action = agent.act(state, epsilon=0.05)
episode_reward += reward
print "Agent score {:0.1f} reward for episode.".format(episode_reward)
agent.end_episode()
except KeyboardInterrupt:
print "Exiting out!"
| 6,844 | 30.837209 | 201 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/docs/conf.py | import sys
import os
from mock import Mock
sys.modules['pygame'] = Mock()
sys.modules['pygame.constants'] = Mock()
#so we can import ple
sys.path.append(os.path.join(os.path.dirname(__name__), ".."))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc'
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'PyGame Learning Environment'
copyright = u'2016, Norman Tasfi'
author = u'Norman Tasfi'
import ple
version = u'0.1.dev1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.dev1'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
#from lasagne!
if os.environ.get('READTHEDOCS') != 'True':
try:
import sphinx_rtd_theme
except ImportError:
pass # assume we have sphinx >= 1.3
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'PyGameLearningEnvironmentdoc'
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyGameLearningEnvironment.tex', u'PyGame Learning Environment Documentation',
u'Norman Tasfi', 'manual'),
]
man_pages = [
(master_doc, 'pygamelearningenvironment', u'PyGame Learning Environment Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'PyGameLearningEnvironment', u'PyGame Learning Environment Documentation',
author, 'PyGameLearningEnvironment', 'RL for all.',
'Miscellaneous'),
]
| 1,940 | 22.962963 | 95 | py |
Intraclass-clustering-measures | Intraclass-clustering-measures-main/measures.py | '''
Measures of intraclass clustering ability and generalization
'''
import sys
sys.path.insert(0, "../")
import warnings
import numpy as np
from scipy.spatial.distance import cosine
from sklearn.metrics import silhouette_score, silhouette_samples, calinski_harabasz_score
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from keras import losses
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Activation
from keras.constraints import Constraint
from keras.optimizers import SGD
from keras.callbacks import LearningRateScheduler, Callback
from keras.engine.training_arrays import predict_loop, test_loop
from keras.preprocessing.image import ImageDataGenerator
from utils_training import history_todict, lr_schedule
def model_extract_tensors(model,input_data,tensors,batch_size=128, training_phase = 0):
input_tensors = [model.inputs[0], # input data
K.learning_phase()] # train or test mode
f = K.function(inputs=input_tensors, outputs=tensors)
# last element of inputs is not sliced in batches thanks to keras :)
inputs = [input_data, training_phase]
outputs = predict_loop(model,f, inputs, batch_size = batch_size, verbose = 0)
return outputs
def collect_activations(model,x,batch_size = 128,training_phase = 0,preact=False):
# collect activation layers
relu_outputs = []
for layer in model.layers[:-1]:
if ('relu' in layer.name) or isinstance(layer,Activation) or isinstance(layer,ControllableReLU):
representation = layer.input if preact else layer.output
if len(layer.input_shape)==2:
relu_outputs.append(representation)
elif len(layer.input_shape)==4:
relu_outputs.append(K.max(representation,axis = [1,2])) # global max pooling
# extract relu activations
activations = model_extract_tensors(model,x,relu_outputs,batch_size=batch_size,training_phase = training_phase)
if not isinstance(activations,list):
activations = [activations]
return activations
def evaluate_in_training_mode(model,x,y,sample_weights = None, batch_size = 128, verbose = 0):
if sample_weights == None:
sample_weights = np.ones((x.shape[0],),np.float32)
ins = [x, y, sample_weights, 1]
model._make_test_function()
f = model.test_function
return test_loop(model, f, ins,
batch_size=batch_size,
verbose=verbose)
def blackbox_subclass(model,x,y,suby, batch_size = 128,training_phase = 0,data_subset = 1.,agg = 'max'):
'''
measure c_0
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y,suby = x[subset],y[subset],suby[subset]
# class - subclass correspondence (nbclasses,nbsubclasses)
# 1: all samples from a subclass are in a given class, 0: no samples from a subclass are in a given class
correspondence = np.dot(y.T,suby)/suby.sum(axis = 0)
metric_per_subclass = []
for subclass_index in range(suby.shape[1]):
class_index = np.argmax(correspondence[:,subclass_index])
samples_subclass = suby[:,subclass_index].astype(bool)
# selects samples from the class to which the subclass belongs
samples_class = y[:,class_index]
# remove samples from the subclass
samples_class = (samples_class-samples_class*samples_subclass).astype(bool)
x_subclass = x[samples_subclass]
x_subclass_shuffled = x_subclass[np.random.permutation(len(x_subclass))]
x_class = x[samples_class]
x_class = x_class[np.random.permutation(len(x_class))[:len(x_subclass)]]
scores = []
for x1,x2 in [(x_subclass,x_subclass_shuffled),(x_subclass,x_class)]:
interpolation_factors = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
outs = []
for factor in interpolation_factors:
# interpolate between samples from x1 and x2
preds = model.predict(x1*factor + x2*(1-factor), batch_size=batch_size)
# For each interpolation point, record prediction on the correct class
outs.append(preds[:,class_index])
if agg == 'max':
# for each pair of samples, compute maximum deviation from perfect prediction along the interpolation points
# average over all pairs of samples
scores.append(np.mean(np.max(1-np.array(outs),axis = 0)))
elif agg == 'sum':
scores.append(np.mean(np.sum(1-np.array(outs),axis = 0)))
else:
raise ValueError('agg argument wrongly specified. Should be either max or sum.')
# compare results for pairs of samples inside the same subclass, versus
# pairs of samples from different subclasses (but still in the same class)
metric_per_subclass.append(scores[1]/scores[0])
return np.median(np.array(metric_per_subclass))
def neural_subclass_selectivity(model,x,y,suby, batch_size = 128,training_phase = 0,
layerwise = False,subclass_agg='median', neuron_agg ='max',data_subset = 1.,preact=True):
'''
measure c_1
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y,suby = x[subset],y[subset],suby[subset]
# collect activations
activations = collect_activations(model,x,batch_size,training_phase,preact=preact)
# class - subclass correspondence (nbclasses,nbsubclasses)
# 1: all samples from a subclass are in a given class, 0: no samples from a subclass are in a given class
correspondence = np.dot(y.T,suby)/suby.sum(axis = 0)
subclass_selectivity = []
for subclass in range(suby.shape[1]):
samples_subclass = suby[:,subclass].astype(bool)
# selects samples from the class to which the subclass belongs
samples_class = y[:,np.argmax(correspondence[:,subclass])]
# remove samples from the subclass
samples_class = (samples_class-samples_class*suby[:,subclass]).astype(bool)
subclass_selectivity_neurons = []
for layer,acts in enumerate(activations):
mean_subclass = np.mean(acts[samples_subclass],axis = 0)
std_subclass = np.std(acts[samples_subclass],axis = 0)
mean_class = np.mean(acts[samples_class],axis = 0)
std_class = np.std(acts[samples_class],axis = 0)
selectivity = (mean_subclass-mean_class) / (std_subclass + std_class+1e-7)
# ignore dead neurons
selectivity = selectivity*(1-np.all(acts<0.,axis = 0))
subclass_selectivity_neurons.append(selectivity)
if not layerwise:
# concatenate neurons of all layers
subclass_selectivity_neurons = [np.concatenate(subclass_selectivity_neurons)]
if neuron_agg == 'max':
# max over neurons
subclass_selectivity.append([np.max(l) for l in subclass_selectivity_neurons])
if neuron_agg == 'topk':
# mean of topk neurons. k is such that (nb_neurons/nb_subclasses) neurons are selected
subclass_selectivity.append([])
for l in subclass_selectivity_neurons:
k = max(round(len(l)/suby.shape[1]),1) # k should be at least 1
subclass_selectivity[-1].append( np.mean(np.partition(l,-k)[-k:]) ) # mean of top k
# dimensions should be (nb_subclasses, nb_layers) if layerwise or (nb_subclasses,1) if not layerwise
subclass_selectivity = np.array(subclass_selectivity)
if subclass_agg == 'mean':
selectivity = np.mean(subclass_selectivity,axis=0)
elif subclass_agg == 'median':
selectivity = np.median(subclass_selectivity,axis=0)
elif subclass_agg == 'max':
selectivity = np.max(subclass_selectivity,axis=0)
return selectivity
def layer_subclass_clustering(model,x,y,suby, batch_size = 128,training_phase = 0, data_subset = 1.,layerwise = False,subclass_agg='median',preact=False):
'''
measure c_2
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y,suby = x[subset],y[subset],suby[subset]
# collect activations
activations = collect_activations(model,x,batch_size,training_phase,preact=preact)
if preact:
for i,act in enumerate(activations):
act = (act-np.mean(act,axis=0)) / np.std(act,axis = 0)
# percentile is computed such that at least 10 neurons are activated by each sample in each layer on average
percentile = min(round(100-100*10/act.shape[1]) , 75)
thres = np.percentile(act,percentile,axis = 0,keepdims = True)
activations[i] = np.maximum(act-thres,0)
# class - subclass correspondence (nbclasses,nbsubclasses)
# 1: all samples from a subclass are in a given class, 0: no samples from a subclass are in a given class
correspondence = np.dot(y.T,suby)/suby.sum(axis = 0)
subclass_clustering_per_layer = []
for layer,acts in enumerate(activations):
subclass_clustering_per_layer.append([])
for c in range(y.shape[1]):
samples_class = y[:,c].astype(bool)
# provides a silhouette score per sample
score = silhouette_samples(acts[samples_class],
np.where(suby[samples_class][:,(correspondence[c]>0.).astype(bool)])[1],
metric='cosine')
for subclass in np.where(correspondence[c]>0.)[0]:
# compute mean silhouette score for each subclass
subclass_clustering_per_layer[-1].append(np.mean(score[suby[samples_class][:,subclass].astype(bool)]))
# dimensions should be (nb_layers, nb_subclasses)
subclass_clustering_per_layer = np.array(subclass_clustering_per_layer)
if not layerwise:
# max over layers
subclass_clustering = np.max(subclass_clustering_per_layer,axis = 0)
else:
subclass_clustering = subclass_clustering_per_layer
if subclass_agg == 'mean':
subclass_clustering = np.mean(subclass_clustering,axis = -1)
elif subclass_agg == 'median':
subclass_clustering = np.median(subclass_clustering,axis = -1)
elif subclass_agg == 'max':
subclass_clustering = np.max(subclass_clustering,axis = -1)
return subclass_clustering
def neural_intraclass_selectivity(model,x,y,batch_size = 128,training_phase = 0, data_subset = 1.,layerwise = False,subclass_agg='mean',preact=True, k_neuron=None,not_all = False):
'''
measure c_3
'''
if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
x,y = x[subset],y[subset]
activations = collect_activations(model,x,batch_size=batch_size,training_phase=training_phase,preact=preact)
# pre-compute neuron-wise std on the data
stds_all = []
for layer,acts in enumerate(activations):
stds_all.append(np.std(acts,axis = 0))
# compute neural selectivity for each layer
subclass_selectivity = []
for c in range(y.shape[1]):
# selects samples from the class
samples_class = y[:,c].astype(bool)
subclass_selectivity_layer = []
for layer,acts in enumerate(activations):
# mean_class = np.mean(acts[samples_class],axis = 0)
std_class = np.std(acts[samples_class],axis = 0)
if not_all:
std_all = np.std(acts[(1-samples_class).astype(bool)],axis = 0)
else:
std_all = stds_all[layer]
selectivity = std_class / (std_all+1e-7)
# ignore dead neurons
selectivity = selectivity*(1-np.all(acts<0.,axis = 0))
subclass_selectivity_layer.append(selectivity)
if not layerwise:
# concatenate neurons of all layers
subclass_selectivity_layer = [np.concatenate(subclass_selectivity_layer)]
# mean of topk neurons.
subclass_selectivity.append([])
for l in subclass_selectivity_layer:
if k_neuron == None:
# k_neuron is such that (nb_neurons/nb_classes) neurons are selected
k_neuron = max(round(len(l)/y.shape[1]),1) # k should be at least 1
subclass_selectivity[-1].append( np.mean(np.partition(l,-k_neuron)[-k_neuron:]) ) # mean of top k
# dimensions should be (nb_subclasses, nb_layers) if layerwise or (nb_subclasses,1) if not layerwise
subclass_selectivity = np.array(subclass_selectivity)
if subclass_agg == 'mean':
selectivity = np.mean(subclass_selectivity,axis=0)
elif subclass_agg == 'median':
selectivity = np.median(subclass_selectivity,axis=0)
elif subclass_agg == 'max':
selectivity = np.max(subclass_selectivity,axis=0)
return selectivity
def layer_intraclass_clustering(model,x,y,batch_size = 128,training_phase = 0, data_subset = 1.,layerwise = False,subclass_agg='mean',preact=True,k_layer = 1):
'''
measure c_4
'''
# if data_subset <1.: # use subset of the data to estimate metric
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
# x,y = x[subset],y[subset]
# collect activations
activations = collect_activations(model,x,batch_size=batch_size,training_phase=training_phase,preact=preact)
for i,act in enumerate(activations):
# ignore dead neurons
act = act[:,~np.all(act<0.,axis = 0)]
act = (act-np.mean(act,axis=0)) / (np.std(act,axis = 0)+1e-7)
if act.shape[1]!=0:
percentile = max(min(round(100-100*10/act.shape[1]) , 75),0)
thres = np.percentile(act,percentile,axis = 0,keepdims = True)
activations[i] = np.maximum(act-thres,0)
else:
del activations[i]
subclass_clustering_per_layer = []
for layer,acts in enumerate(activations):
dists = cosine_distances(acts[subset])
std_all = np.std(dists)
# mean_all = np.mean(dists)
subclass_clustering_per_layer.append([])
for c in range(y.shape[1]):
samples_class = y[:,c].astype(bool)
dists = cosine_distances(acts[samples_class])
std_class = np.std(dists)
# mean_class = np.mean(dists)
selectivity = std_class / (std_all+1e-7)
subclass_clustering_per_layer[-1].append(selectivity)
# dimensions should be (nb_layers, nb_classes)
subclass_clustering_per_layer = np.array(subclass_clustering_per_layer)
if not layerwise:
# mean over topk layers
subclass_clustering = np.mean(np.sort(subclass_clustering_per_layer,axis = 0)[-k_layer:,:],axis=0)
else:
subclass_clustering = subclass_clustering_per_layer
if subclass_agg == 'mean':
subclass_clustering = np.mean(subclass_clustering,axis = -1)
elif subclass_agg == 'median':
subclass_clustering = np.median(subclass_clustering,axis = -1)
elif subclass_agg == 'max':
subclass_clustering = np.max(subclass_clustering,axis = -1)
return subclass_clustering
def sharpness_random(model,x,y, data_subset = .1, epsilon_weight_scale = 1e-3, nb_samplings = 10,
kernel_only = False, training_phase = 0, batch_size = 300):
'''
training_phase=1 is useful to use batchstatistics with batchnorm. But be careful to remove dropout layers!
code adapted from NeurIPS "predicting generalization in deep learning" competition starting kit
https://competitions.codalab.org/competitions/25301
'''
model.compile(loss='categorical_crossentropy',
optimizer=SGD(0.),
metrics=['accuracy'])
# collect trainable weights and their original values
weights = model.trainable_weights
weights_orig = [K.get_value(w) for w in weights]
# m represents the bounds for the weights perturbation
# m will be optimized such that optimizing within these bounds reaches the target deviate
# for this optimization, h and l represent high and low tentative values of m and a bisectional method is used
h, l = 2.0, 0.000000
target_accuracy = 0.9
for i in range(20): # loop to find perturbation scale
m = (h + l) / 2. # m fixes the bounds for the weight perturbation
accuracy = 0.
for k in range(nb_samplings): # loop to estimate accuracy of perturbed model given a perturbation scale
for w,w_orig in zip(weights,weights_orig):
if not kernel_only or len(w_orig.shape)>1.: # kernels are assumed to be the only weights with more than one dimension
noisy = w_orig + np.random.normal(0.,scale = m, size=list(w_orig.shape)) * (np.abs(w_orig)+epsilon_weight_scale)
K.set_value(w,noisy)
# use subset of the data to estimate accuracy (a different subset is used for every estimation)
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
if training_phase==1:
estimate_accuracy = evaluate_in_training_mode(model,x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
elif training_phase==0:
estimate_accuracy = model.evaluate(x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
accuracy += estimate_accuracy
accuracy /=nb_samplings
if h - l < 1e-5 or abs(accuracy - target_accuracy) < 5e-3:
break
if accuracy < target_accuracy:
h = m
else:
l = m
# reset original weight values
for w,w_orig in zip(weights,weights_orig):
K.set_value(w,w_orig)
return m, accuracy - target_accuracy
class Clip(Constraint):
"""Element-wise clipping of weight tensors. Upper ad lower bounds are tensors of same shape as the weights"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __call__(self, w):
return K.minimum(K.maximum(w,self.lower_bound),self.upper_bound)
class StoppingCriteria(Callback):
'''
Callback that stops training before the announced number of epochs when some criteria are met.
'''
def __init__(self, accuracy):
'''
'''
super().__init__()
self.acc = accuracy
def on_epoch_end(self, epoch, logs=None):
if logs.get('accuracy')<= self.acc:
self.model.stop_training = True
def sharpness_worstcase(model,x,y, data_subset = .1, epsilon_weight_scale = 1e-3,
kernel_only = False, training_phase = 0,
batch_size = 128, epochs = 5,lr = 1.,noise = False):
'''
dropout layers should be removed!
code adapted from NeurIPS "predicting generalization in deep learning" competition starting kit
https://competitions.codalab.org/competitions/25301
'''
orig_weights = model.get_weights()
# collect weights and their initial values
# prepare upper and lower bounds
weights = model.trainable_weights
weights_orig = [K.get_value(w) for w in weights]
weight_upper_bounds = [K.variable(K.get_value(w)) for w in weights]
weight_lower_bounds = [K.variable(K.get_value(w)) for w in weights]
for w,w_upper,w_lower in zip(weights, weight_upper_bounds, weight_lower_bounds):
if w.constraint is not None: # keras allows only one constraint per weight
warnings.warn("a weight constraint has been overwritten by the sharpness_worstcase() call")
w._constraint = Clip(w_lower, w_upper)
# increase the original loss
model.compile(loss = lambda y_true,y_pred: -losses.categorical_crossentropy(y_true,y_pred),
optimizer=SGD(lr),
metrics=['accuracy'])
lr_sched = LearningRateScheduler(lr_schedule(lr,0.1,[3]))
# m represents the bounds for the weights perturbation
# m will be optimized such that optimizing within these bounds reaches the target deviate
# for this optimization, h and l represent high and low tentative values of m and a bisectional method is used
h, l = .25, 0.000000
# h, l = .1, 0.000000
target_accuracy = 0.9
stop = StoppingCriteria(0.7) # training will stop if train accuracy is below 70%
for i in range(20): # loop to find perturbation scale
m = (h + l) / 2. # m fixes the bounds for the weight perturbation
nb_samplings = 3 if noise else 1
min_accuracy = 1.
for k in range(nb_samplings):
for w,w_orig,w_upper,w_lower in zip(weights, weights_orig, weight_upper_bounds, weight_lower_bounds):
if not kernel_only or len(w_orig.shape)>1.: # kernels are assumed to be the only weights with more than one dimension
if noise:
# add uniform noise to the kernels to accelerate training
noisy = w_orig+np.random.uniform(low=-m/2, high=m/2,
size=list(w_orig.shape)) * (np.abs(w_orig)+epsilon_weight_scale)
K.set_value(w,noisy)
else:
K.set_value(w,w_orig)
# set optimization constraints
K.set_value(w_lower,w_orig- m*(np.abs(w_orig)+epsilon_weight_scale))
K.set_value(w_upper,w_orig+ m*(np.abs(w_orig)+epsilon_weight_scale))
# use subset of the data to train and estimate accuracy (a different subset is used for every estimation)
datagen = ImageDataGenerator()
history = model.fit_generator(datagen.flow(x, y,batch_size=batch_size),
steps_per_epoch=50,#int(data_subset*x_train.shape[0]/batch_size),
epochs=epochs,
verbose = 0,
callbacks = [lr_sched,stop])
subset = np.random.permutation(x.shape[0])[:int(x.shape[0]*data_subset)]
if training_phase == 1:
# evaluation is in training mode (which is good, 'cause no need to update batchnorm running statistics)
# but careful for dropout: should be disabled
accuracy = history.history['accuracy'][-1]
# accuracy = evaluate_in_training_mode(model,x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
elif training_phase == 0:
accuracy = model.evaluate(x[subset],y[subset],verbose = 0, batch_size = batch_size)[1]
min_accuracy = min(min_accuracy,accuracy) # only useful when noise = True
accuracy = min_accuracy
if h - l < 1e-5 or abs(accuracy - target_accuracy) < 5e-3:
break
if accuracy < target_accuracy:
h = m
else:
l = m
model.set_weights(orig_weights)
for w in weights:
w._constraint = None
return m, accuracy - target_accuracy,history_todict(history) | 23,997 | 45.15 | 181 | py |
Diverse-ViT | Diverse-ViT-main/main.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import warnings
warnings.filterwarnings('ignore')
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from loss_scaler import NativeScaler as NativeScaler_new
from datasets import build_dataset
from engine import evaluate, train_one_epoch_diverse
from losses import DistillationLoss
from samplers import RASampler
import models
import utils
import torch.nn as nn
from mix import Mixup_diversity
def get_args_parser():
parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
#### Diversity Regularization ####
parser.add_argument('--mixing_coef', type=float, default=0, help='')
parser.add_argument('--emb_cos_within_coef', type=float, default=0, help='')
parser.add_argument('--emb_contrast_cross_coef', type=float, default=0, help='')
parser.add_argument('--attn_cos_within_coef', type=float, default=0, help='')
parser.add_argument('--weight_mha_cond_orth_coef', type=float, default=0, help='')
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--scale_lr_flag', action='store_false')
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--teacher_eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = Mixup_diversity(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
if args.scale_lr_flag:
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
# teacher_model = create_model(
# args.teacher_model,
# pretrained=False,
# num_classes=args.nb_classes,
# global_pool='avg',
# )
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
max_accuracy = 0.0
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if 'max_acc' in checkpoint:
max_accuracy = checkpoint['max_acc']
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
if args.teacher_eval:
test_stats = evaluate(data_loader_val, teacher_model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
print('Training with diversity regularization')
train_stats = train_one_epoch_diverse(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
set_training_mode=args.finetune == '', args=args # keep in eval mode during finetuning
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
'max_acc': max_accuracy
}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if test_stats["acc1"] > max_accuracy:
checkpoint_paths = [output_dir / 'checkpoint_best.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
'max_acc': max_accuracy
}, checkpoint_path)
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 22,308 | 47.079741 | 119 | py |
Diverse-ViT | Diverse-ViT-main/losses.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implements the knowledge distillation loss
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| 2,792 | 41.969231 | 114 | py |
Diverse-ViT | Diverse-ViT-main/engine.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
"""
import sys
import math
import utils
import torch
import torch.nn as nn
from timm.data import Mixup
from losses import DistillationLoss
from typing import Iterable, Optional
from timm.utils import accuracy, ModelEma
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from reg import *
def train_one_epoch_diverse(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True, args=None):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 50
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, patch_targets, targets = mixup_fn(samples, targets)
patch_targets = patch_targets.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
outputs, h_first, h_last, attn_first = model(samples)
loss = criterion(samples, outputs[:,0], targets)
if not args.mixing_coef == 0:
loss_mix, patch_num = Loss_mixing(outputs, patch_targets)
loss = (loss + loss_mix) / patch_num
if not args.emb_cos_within_coef == 0:
loss_diverse = Loss_cosine(h_last)
loss += args.emb_cos_within_coef * loss_diverse
if not args.emb_contrast_cross_coef == 0:
loss_diverse = Loss_contrastive(h_first, h_last)
loss += args.emb_contrast_cross_coef * loss_diverse
if not args.attn_cos_within_coef == 0:
loss_diverse = Loss_cosine_attn(attn_first)
loss += args.attn_cos_within_coef * loss_diverse
if not args.weight_mha_cond_orth_coef == 0:
loss_diverse = 0
for pname, pweight in model.named_parameters():
if 'attn.qkv.weight' in pname:
dim = pweight.shape[-1]
new_weight = pweight.reshape(3, dim, dim)
qw, kw, vw = new_weight[0,:,:], new_weight[1,:,:], new_weight[2,:,:]
qloss = Loss_condition_orth_weight(qw)
loss_diverse += qloss
kloss = Loss_condition_orth_weight(kw)
loss_diverse += kloss
vloss = Loss_condition_orth_weight(vw)
loss_diverse += vloss
loss += args.weight_mha_cond_orth_coef*loss_diverse
loss_value = loss.item()
if not math.isfinite(loss_value):
print("* Loss is {}, skip current iteration".format(loss_value))
loss = torch.nan_to_num(loss)
loss_value = loss.item()
# sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| 5,354 | 39.263158 | 98 | py |
Diverse-ViT | Diverse-ViT-main/hubconf.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from models import *
dependencies = ["torch", "torchvision", "timm"]
| 138 | 22.166667 | 47 | py |
Diverse-ViT | Diverse-ViT-main/gradinit_optimizers.py | import torch
import math
import pdb
class RescaleAdam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
min_scale=0, grad_clip=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, amsgrad=amsgrad, min_scale=min_scale, grad_clip=grad_clip)
super(RescaleAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RescaleAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, is_constraint=False):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
grad_list = []
alphas = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# State initialization
amsgrad = group['amsgrad']
state = self.state[p]
if len(state) == 0:
state['alpha'] = 1.
state['init_norm'] = p.norm().item()
state['step'] = 0
state['cons_step'] = 0
# Exponential moving average of gradient values for the weight norms
state['exp_avg'] = 0
# Exponential moving average of squared gradient values for the weight norms
state['exp_avg_sq'] = 0
state['cons_exp_avg'] = 0
# state['cons_exp_avg_sq'] = 0
# if amsgrad:
# # Maintains max of all exp. moving avg. of sq. grad. values
# state['max_exp_avg_sq'] = 0
# alphas.append(state['alpha'])
curr_norm = p.data.norm().item()
if state['init_norm'] == 0 or curr_norm == 0:
# pdb.set_trace()
continue # typical for biases
grad = torch.sum(p.grad * p.data).item() * state['init_norm'] / curr_norm
# grad_list.append(grad)
if group['grad_clip'] > 0:
grad = max(min(grad, group['grad_clip']), -group['grad_clip'])
# Perform stepweight decay
# if group['weight_decay'] > 0:
# p.mul_(1 - group['lr'] * group['weight_decay'])
beta1, beta2 = group['betas']
if is_constraint:
state['cons_step'] += 1
state['cons_exp_avg'] = state['cons_exp_avg'] * beta1 + grad * (1 - beta1)
# state['cons_exp_avg_sq'] = state['cons_exp_avg_sq'] * beta2 + (grad * grad) * (1 - beta2)
steps = state['cons_step']
exp_avg = state['cons_exp_avg']
# exp_avg_sq = state['cons_exp_avg_sq']
else:
# pdb.set_trace()
state['step'] += 1
state['exp_avg'] = state['exp_avg'] * beta1 + grad * (1 - beta1)
steps = state['step']
exp_avg = state['exp_avg']
state['exp_avg_sq'] = state['exp_avg_sq'] * beta2 + (grad * grad) * (1 - beta2)
exp_avg_sq = state['exp_avg_sq']
bias_correction1 = 1 - beta1 ** steps
bias_correction2 = 1 - beta2 ** (state['cons_step'] + state['step'])
# Decay the first and second moment running average coefficient
# if amsgrad:
# # Maintains the maximum of all 2nd moment running avg. till now
# state['max_exp_avg_sq'] = max(state['max_exp_avg_sq'], state['exp_avg_sq'])
# # Use the max. for normalizing running avg. of gradient
# denom = math.sqrt(state['max_exp_avg_sq'] / bias_correction2) + group['eps']
# else:
denom = math.sqrt(exp_avg_sq / bias_correction2) + group['eps']
step_size = group['lr'] / bias_correction1
# update the parameter
state['alpha'] = max(state['alpha'] - step_size * exp_avg / denom, group['min_scale'])
p.data.mul_(state['alpha'] * state['init_norm'] / curr_norm)
# print(alphas)
# print(grad_list)
# print(max(grad_list), min(grad_list), max(alphas), min(alphas))
# pdb.set_trace()
return loss
def reset_momentums(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
amsgrad = group['amsgrad']
if len(state) == 0:
state['alpha'] = 1.
state['init_norm'] = p.norm().item()
state['step'] = 0
# Exponential moving average of gradient values for the weight norms
state['exp_avg'] = 0
# Exponential moving average of squared gradient values for the weight norms
state['exp_avg_sq'] = 0
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = 0
else:
state['step'] = 0
# Exponential moving average of gradient values for the weight norms
state['exp_avg'] = 0
# Exponential moving average of squared gradient values for the weight norms
state['exp_avg_sq'] = 0
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = 0 | 7,541 | 45.269939 | 111 | py |
Diverse-ViT | Diverse-ViT-main/utils.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| 7,067 | 28.573222 | 94 | py |
Diverse-ViT | Diverse-ViT-main/vision_transformer_diverse.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020 Ross Wightman
"""
'''
gumbel softmax on the top layer
'''
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn_origin = (q @ k.transpose(-2, -1)) * self.scale
attn_origin = torch.softmax(attn_origin, dim=-1) #(B, Heads, N, N)
attn = self.attn_drop(attn_origin)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn_origin
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
# def forward(self, x):
# x = x + self.drop_path(self.attn(self.norm1(x)))
# x = x + self.drop_path(self.mlp(self.norm2(x)))
# return x
def forward(self, x):
shortcut = x
x, affinity_attn = self.attn(self.norm1(x))
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, affinity_attn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None, weight_init=''):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
if distilled:
assert False
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
self.features = []
self.token_grad = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if weight_init.startswith('jax'):
# leave cls token as zeros to match jax impl
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x1 = self.pos_drop(x + self.pos_embed)
xtemp = x1
for ii, blk in enumerate(self.blocks):
xtemp, affinity_attn = blk(xtemp)
if ii == 0:
attn1 = affinity_attn
xl = xtemp
xo = self.norm(xl)
xo = self.head(xo)
if self.training:
return xo, x1, xl, attn1
else:
return xo[:,0]
def _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.constant_(m.bias, head_bias)
elif n.startswith('pre_logits'):
lecun_normal_(m.weight)
nn.init.zeros_(m.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
else:
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif jax_impl and isinstance(m, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def resize_pos_embed(posemb, posemb_new, num_tokens=1):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
_logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed, getattr(model, 'num_tokens', 1))
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs.pop('img_size', default_img_size)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
| 17,793 | 40.574766 | 132 | py |
Diverse-ViT | Diverse-ViT-main/layers.py | import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _pair
class Linear(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__(in_features, out_features, bias)
self.register_buffer('weight_mask', torch.ones(self.weight.shape))
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return F.linear(input, W, b)
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, bias, padding_mode)
self.register_buffer('weight_mask', torch.ones(self.weight.shape))
def _conv_forward(self, input, weight, bias):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight, bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return self._conv_forward(input, W, b)
| 1,575 | 36.52381 | 95 | py |
Diverse-ViT | Diverse-ViT-main/datasets.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train_new' if is_train else 'val')
if not os.path.exists(root):
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| 4,235 | 36.821429 | 105 | py |
Diverse-ViT | Diverse-ViT-main/reg.py | import torch
import numpy as np
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
__all__ = ['Loss_mixing', 'Loss_cosine', 'Loss_contrastive',
'Loss_cosine_attn', 'Loss_condition_orth_weight']
# Embedding Level Size: (Batch-size, Tokens, Dims * Heads)
# Attention Level Size: (Batch-size, Heads, Tokens, Tokens) -> (Batch-size, Heads, Tokens * Tokens)
# Similarity Regularization, input: (Batch-size, Diverse-Target, Dimension)
################# Main Regularization ###############
def Loss_mixing(output, patch_target):
# output (B,197,384)
# patch_target (B,)
criterion = SoftTargetCrossEntropy()
patch_num = output.shape[1]
loss = 0
for i in range(1,patch_num):
loss += criterion(output[:,i], patch_target[:,i-1])
return loss, patch_num
def Loss_cosine(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.mean()
return loss_cos
def Loss_contrastive(h1_emb, hl_emb, eps=1e-8):
h1_emb_target = h1_emb[:,1:]
hl_emb_target = hl_emb[:,1:]
hshape = h1_emb_target.shape
# h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1).detach()
h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1)
h1_n = h1_emb_target.norm(dim=2).unsqueeze(2)
h1_norm = h1_emb_target/torch.max(h1_n, eps*torch.ones_like(h1_n))
hl_emb_target = hl_emb_target.reshape(hshape[0], hshape[1], -1)
hl_n = hl_emb_target.norm(dim=2).unsqueeze(2)
hl_norm = hl_emb_target/torch.max(hl_n, eps*torch.ones_like(hl_n))
sim_matrix = torch.einsum('abc,adc->abd', h1_norm, hl_norm)
sim_diag = torch.diagonal(sim_matrix, dim1=1, dim2=2)
dim2 = sim_diag.shape[1]
exp_sim_diag = torch.exp(sim_diag)
temp_sim = torch.sum(sim_matrix, dim=2)
temp_sim = torch.exp((temp_sim-sim_diag)/(dim2-1))
nce = -torch.log(exp_sim_diag/(exp_sim_diag+temp_sim))
return nce.mean()
def Loss_cosine_attn(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.mean() # also add diagnoal elements
return loss_cos
def dominant_eigenvalue(A, dev):
N, _ = A.size()
x = torch.rand(N, 1, device=dev)
Ax = (A @ x)
AAx = (A @ Ax)
return AAx.permute(1, 0) @ Ax / (Ax.permute(1, 0) @ Ax)
def get_singular_values(A, dev):
ATA = A.permute(1, 0) @ A
N, _ = ATA.size()
largest = dominant_eigenvalue(ATA, dev)
I = torch.eye(N, device=dev)
I = I * largest
tmp = dominant_eigenvalue(ATA - I, dev)
return tmp + largest, largest
def Loss_condition_orth_weight(W):
W = W.permute(1, 0) # (in, out)
smallest, largest = get_singular_values(W, W.device)
return torch.mean((largest - smallest)**2)
################# Additional Regularization ###############
def loss_cosine_reg(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_attn_reg(h_emb, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
hshape = target_h_emb.shape
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_reg(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
target_h_emb2 = h_emb2[:,1:]
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_attn_reg(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
target_h_emb2 = h_emb2
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.abs().mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_reg_noabs(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb[:,1:]
target_h_emb2 = h_emb2[:,1:]
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.mean() # also add diagnoal elements
return loss_cos
def loss_cosine_across_attn_reg_noabs(h_emb, h_emb2, eps=1e-8):
# h_emb (B, Tokens, dims * heads)
# normalize
target_h_emb = h_emb
target_h_emb2 = h_emb2
hshape = target_h_emb.shape
# target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1).detach()
target_h_emb = target_h_emb.reshape(hshape[0], hshape[1], -1)
a_n = target_h_emb.norm(dim=2).unsqueeze(2)
a_norm = target_h_emb / torch.max(a_n, eps * torch.ones_like(a_n))
target_h_emb2 = target_h_emb2.reshape(hshape[0], hshape[1], -1)
a_n2 = target_h_emb2.norm(dim=2).unsqueeze(2)
a_norm2 = target_h_emb2 / torch.max(a_n2, eps * torch.ones_like(a_n2))
# patch-wise absolute value of cosine similarity
sim_matrix = torch.einsum('abc,acd->abd', a_norm, a_norm2.transpose(1,2))
loss_cos = sim_matrix.mean() # also add diagnoal elements
return loss_cos
def loss_contrastive_attn_reg(h1_emb_target, hl_emb_target, eps=1e-8):
hshape = h1_emb_target.shape
# h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1).detach()
h1_emb_target = h1_emb_target.reshape(hshape[0], hshape[1], -1)
h1_n = h1_emb_target.norm(dim=2).unsqueeze(2)
h1_norm = h1_emb_target/torch.max(h1_n, eps*torch.ones_like(h1_n))
hl_emb_target = hl_emb_target.reshape(hshape[0], hshape[1], -1)
hl_n = hl_emb_target.norm(dim=2).unsqueeze(2)
hl_norm = hl_emb_target/torch.max(hl_n, eps*torch.ones_like(hl_n))
sim_matrix = torch.einsum('abc,adc->abd', h1_norm, hl_norm)
sim_diag = torch.diagonal(sim_matrix, dim1=1, dim2=2)
dim2 = sim_diag.shape[1]
exp_sim_diag = torch.exp(sim_diag)
temp_sim = torch.sum(sim_matrix, dim=2)
temp_sim = torch.exp((temp_sim-sim_diag)/(dim2-1))
nce = -torch.log(exp_sim_diag/(exp_sim_diag+temp_sim))
return nce.mean()
# Uniformity Regularization, weight: (Diverse-Target, Dimention) Embedding: (Batch-size, Diverse-Target, Dimension)
def norm(filt):
# filt (dim, out_dim)
filt_norm = ((filt * filt).sum(dim=0) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(1, filt.shape[1])
return filt / filt_norm
def cal(filt):
filt_norm = ((filt * filt).sum(dim=0) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(1, filt.shape[1])
norm_mat = torch.matmul(filt_norm.transpose(1,0), filt_norm)
inner_pro = torch.matmul(filt.transpose(1,0), filt)
return inner_pro / norm_mat
def loss_mhs_weight_reg(filt):
# filt (output_dim, input_dim)
filt = filt.transpose(1,0) # (in, out)
filt = norm(filt)
inner_pro = cal(filt)
final = (2.0 - 2.0 * inner_pro)
final -= torch.triu(final)
nonzeros = torch.where(final!=0)
target = torch.min(final[nonzeros])
mask = final.eq(target)
loss = -(final * mask.detach()).sum()
return loss
def norm_feature(filt):
filt_shape = filt.shape # batch-size, output_dim, input_dim
filt_norm = ((filt * filt).sum(dim=2) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(filt_shape[0], filt_shape[1], 1)
return filt / filt_norm
def cal_feature(filt):
filt_shape = filt.shape # batch-size, output_dim, input_dim
filt_norm = ((filt * filt).sum(dim=2) + 1e-8).sqrt()
filt_norm = filt_norm.reshape(filt_shape[0], filt_shape[1], 1)
norm_mat = torch.einsum('bac,bdc->bad', filt_norm, filt_norm)
inner_pro = torch.einsum('bac,bdc->bad', filt, filt)
return inner_pro / norm_mat
def loss_mhs_feature_reg(filt):
# filt (batch-size, output_dim, input_dim)
batch_size = filt.shape[0]
target_dim = filt.shape[1]
filt = filt.reshape(batch_size, target_dim, -1)
filt = norm_feature(filt)
inner_pro = cal_feature(filt)
final = (2.0 - 2.0 * inner_pro)
final -= torch.triu(final)
loss = 0
for sample in range(batch_size):
nonzeros = torch.where(final[sample,:,:]!=0)
if nonzeros[0].shape[0] > 0:
target = torch.min(final[sample,:,:][nonzeros])
mask = final[sample,:,:].eq(target)
loss += (final[sample,:,:] * mask.detach()).sum()
return -loss/batch_size
def loss_mgd_weight_reg(filt):
# filt (output_dim, input_dim)
n_filt = filt.shape[0]
filt = filt.transpose(1,0) # (in, out)
filt = norm(filt)
inner_pro = cal(filt)
cross_terms = (2.0 - 2.0 * inner_pro)
final = torch.exp(-1 * cross_terms) + torch.diag(1e-6 * torch.ones(n_filt).to(filt.device))
loss = -torch.logdet(final)
return loss
def loss_mgd_feature_reg(filt):
# filt (batch-size, output_dim, input_dim)
batch_size = filt.shape[0]
out_dim = filt.shape[1]
filt = filt.reshape(batch_size, out_dim, -1)
filt = norm_feature(filt)
inner_pro = cal_feature(filt)
cross_terms = (2.0 - 2.0 * inner_pro)
offset = torch.diag(1e-6 * torch.ones(out_dim).to(filt.device)).repeat(batch_size, 1, 1)
final = torch.exp(-1 * cross_terms) + offset
loss = -torch.logdet(final).mean()
return loss
def loss_condition_orth_weight_reg_inverse(W):
smallest, largest = get_singular_values(W, W.device)
return torch.mean((largest - smallest)**2)
def loss_s_orth_weight_reg(A):
ATA = A @ A.permute(1, 0)
N, _ = ATA.size()
I = torch.eye(N, device=A.device)
fnorm = torch.norm(ATA-I, p='fro')
return fnorm**2
def features_dominant_eigenvalue(A):
device = A.device
B, N, _ = A.size()
x = torch.randn(B, N, 1).to(device)
for _ in range(1):
x = torch.bmm(A, x)
numerator = torch.bmm(
torch.bmm(A, x).view(B, 1, N),
x
).squeeze()
denominator = (torch.norm(x.view(B, N), p=2, dim=1) ** 2).squeeze()
return numerator / (denominator + 1e-6)
def features_get_singular_values(A):
device = A.device
AAT = torch.bmm(A, A.permute(0, 2, 1))
B, N, _ = AAT.size()
largest = features_dominant_eigenvalue(AAT)
I = torch.eye(N).expand(B, N, N).to(device)
I = I * largest.view(B, 1, 1).repeat(1, N, N)
tmp = features_dominant_eigenvalue(AAT - I)
return tmp + largest, largest
def loss_condition_orth_embedding_reg(fea, eps=1e-8):
# (batch-size, diverse-target, dimension)
B, N = fea.size(0), fea.size(1)
new_fea = fea.view(B, N, -1)
fea_n = new_fea.norm(dim=2).unsqueeze(2)
new_fea_norm = new_fea/torch.max(fea_n, eps*torch.ones_like(fea_n))
smallest, largest = features_get_singular_values(new_fea_norm)
return torch.mean((largest - smallest)**2)
def loss_condition_orth_attn_reg(fea):
# (bs, diverse-target, dim, dim)
B, H = fea.size(0), fea.size(1)
new_fea = fea.view(B, H, -1)
smallest, largest = features_get_singular_values(new_fea)
return torch.mean((largest - smallest)**2)
def loss_s_orth_attn_reg(A):
# attn (Batch-size, Heads, Tokens, Tokens)
adevice = A.device
B, H = A.shape[0], A.shape[1]
A = A.view(B, H, -1)
ATA = A @ A.permute(0,2,1)
I = torch.eye(H, device=adevice).repeat(B,1,1)
norm_pow2 = (ATA-I)**2
loss = norm_pow2.sum(dim=2).sum(dim=1).mean()
return loss
def loss_s_orth_embedding_reg(A, eps=1e-8):
# (batch-size, diverse-target, dimension)
adevice = A.device
B, H = A.shape[0], A.shape[1]
A = A.view(B, H, -1)
fea_n = A.norm(dim=2).unsqueeze(2)
new_fea_norm = A/torch.max(fea_n, eps*torch.ones_like(fea_n))
ATA = new_fea_norm @ new_fea_norm.permute(0,2,1)
I = torch.eye(H, device=adevice).repeat(B,1,1)
norm_pow2 = (ATA-I)**2
loss = norm_pow2.sum(dim=2).sum(dim=1).mean()
return loss
# Gradient Regularization: Only last Embedding: (Batch-size, Diverse-Target, Dimension)
def loss_grad_diversity_reg(grad_tensor, eps=1e-8):
# grad_tensor (Batch-size, Diverse-Target, Dimension)
grad_tensor = torch.where(torch.isnan(grad_tensor), eps*torch.ones_like(grad_tensor), grad_tensor)
token_sum_grad_tensor = grad_tensor.sum(dim=1)
sum_norm = (token_sum_grad_tensor ** 2).sum(dim=1)
norm_sum = (grad_tensor ** 2).sum(dim=2).sum(dim=1)
loss = norm_sum/sum_norm
return -loss.mean()
| 15,331 | 34.084668 | 115 | py |
Diverse-ViT | Diverse-ViT-main/models.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
from vision_transformer_diverse import VisionTransformer as VisionTransformerdiverse
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_patch16_224_diverse', 'deit_small_patch16_224_diverse', 'deit_base_patch16_224_diverse', 'deit_small_layer24_patch16_224_diverse',
]
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_tiny_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_layer24_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=384, depth=24, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_diverse(pretrained=False, **kwargs):
model = VisionTransformerdiverse(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
| 4,745 | 37.585366 | 146 | py |
Diverse-ViT | Diverse-ViT-main/samplers.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,292 | 37.216667 | 103 | py |
Diverse-ViT | Diverse-ViT-main/loss_scaler.py | """ CUDA / AMP utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
try:
from apex import amp
has_apex = True
except ImportError:
amp = None
has_apex = False
from timm.utils import *
__all__ = ['NativeScaler']
class NativeScaler:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, f, data_iter, data_loader, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False):
self._scaler.scale(loss).backward(create_graph=create_graph)
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
dispatch_clip_grad(parameters, clip_grad, mode=clip_mode)
data_iter = self._scaler.step(optimizer, f, data_iter, data_loader)
self._scaler.update()
return data_iter
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict) | 1,136 | 29.72973 | 138 | py |
Diverse-ViT | Diverse-ViT-main/gradient_utils.py | import torch
from torch import nn
from gradinit_optimizers import RescaleAdam
import numpy as np
import os
class Scale(torch.nn.Module):
def __init__(self):
super(Scale, self).__init__()
self.weight = torch.nn.Parameter(torch.ones(1))
def forward(self, x):
return x * self.weight
class Bias(torch.nn.Module):
def __init__(self):
super(Bias, self).__init__()
self.bias = torch.nn.Parameter(torch.zeros(1))
def forward(self, x):
return x + self.bias
def get_ordered_params(net):
param_list = []
for m in net.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.LayerNorm):
param_list.append(m.weight)
if m.bias is not None:
param_list.append(m.bias)
elif isinstance(m, Scale):
param_list.append(m.weight)
elif isinstance(m, Bias):
param_list.append(m.bias)
return param_list
def set_param(module, name, alg, eta, grad):
weight = getattr(module, name)
# remove this parameter from parameter list
del module._parameters[name]
# compute the update steps according to the optimizers
if alg.lower() == 'sgd':
gstep = eta * grad
elif alg.lower() == 'adam':
gstep = eta * grad.sign()
else:
raise RuntimeError("Optimization algorithm {} not defined!".format(alg))
# add the updated parameter as the new parameter
module.register_parameter(name + '_prev', weight)
# recompute weight before every forward()
updated_weight = weight - gstep.data
setattr(module, name, updated_weight)
def take_opt_step(net, grad_list, alg='adam', eta=0.1):
"""Take the initial step of the chosen optimizer.
"""
assert alg.lower() in ['adam', 'sgd']
idx = 0
for n, m in net.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.LayerNorm):
grad = grad_list[idx]
set_param(m, 'weight', alg, eta, grad)
idx += 1
if m.bias is not None:
grad = grad_list[idx]
set_param(m, 'bias', alg, eta, grad)
idx += 1
elif isinstance(m, Scale):
grad = grad_list[idx]
set_param(m, 'weight', alg, eta, grad)
idx += 1
elif isinstance(m, Bias):
grad = grad_list[idx]
set_param(m, 'bias', alg, eta, grad)
idx += 1
def recover_params(net):
"""Reset the weights to the original values without the gradient step
"""
def recover_param_(module, name):
delattr(module, name)
setattr(module, name, getattr(module, name + '_prev'))
del module._parameters[name + '_prev']
for n, m in net.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.LayerNorm):
recover_param_(m, 'weight')
if m.bias is not None:
recover_param_(m, 'bias')
elif isinstance(m, Scale):
recover_param_(m, 'weight')
elif isinstance(m, Bias):
recover_param_(m, 'bias')
def set_bn_modes(net):
"""Switch the BN layers into training mode, but does not track running stats.
"""
for n, m in net.named_modules():
if isinstance(m, nn.BatchNorm2d):
m.training = True
m.track_running_stats = False
def recover_bn_modes(net):
for n, m in net.named_modules():
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = True
def get_scale_stats(model, optimizer):
stat_dict = {}
# all_s_list = [p.norm().item() for n, p in model.named_parameters() if 'bias' not in n]
all_s_list = []
for param_group in optimizer.param_groups:
for p in param_group['params']:
if 'alpha' in optimizer.state[p]:
all_s_list.append(optimizer.state[p]['alpha'])
stat_dict['s_max'] = max(all_s_list)
stat_dict['s_min'] = min(all_s_list)
stat_dict['s_mean'] = np.mean(all_s_list)
all_s_list = []
for n, p in model.named_parameters():
if 'bias' not in n:
if 'alpha' in optimizer.state[p]:
all_s_list.append(optimizer.state[p]['alpha'])
stat_dict['s_weight_max'] = max(all_s_list)
stat_dict['s_weight_min'] = min(all_s_list)
stat_dict['s_weight_mean'] = np.mean(all_s_list)
return stat_dict
def get_batch(data_iter, data_loader):
try:
inputs, targets = next(data_iter)
except:
data_iter = iter(data_loader)
inputs, targets = next(data_iter)
inputs, targets = inputs.cuda(), targets.cuda()
return data_iter, inputs, targets
def gradinit(net, dataloader,
gradinit_lr=1e-6, gradinit_min_scale=0.01, gradinit_grad_clip=1,
gradinit_gamma=1, gradinit_eta=0.1, batch_no_overlap=False, gradinit_iters=2000):
# if isinstance(net, torch.nn.DataParallel):
# net_top = net.module
# else:
# net_top = net
bias_params = [p for n, p in net.named_parameters() if 'bias' in n]
weight_params = [p for n, p in net.named_parameters() if 'weight' in n]
optimizer = RescaleAdam([{'params': weight_params, 'min_scale': gradinit_min_scale, 'lr': gradinit_lr},
{'params': bias_params, 'min_scale': 0, 'lr': gradinit_lr}],
grad_clip=gradinit_grad_clip)
criterion = nn.CrossEntropyLoss()
net.eval() # This further shuts down dropout, if any.
total_loss, total_l0, total_l1, total_residual, total_gnorm = 0, 0, 0, 0, 0
total_sums, total_sums_gnorm = 0, 0
cs_count = 0
total_iters = 0
obj_loss, updated_loss, residual = -1, -1, -1
data_iter = iter(dataloader)
# get all the parameters by order
params_list = get_ordered_params(net)
while True:
eta = gradinit_eta
# continue
# get the first half of the minibatch
data_iter, init_inputs_0, init_targets_0 = get_batch(data_iter, dataloader)
# Get the second half of the data.
data_iter, init_inputs_1, init_targets_1 = get_batch(data_iter, dataloader)
init_inputs = torch.cat([init_inputs_0, init_inputs_1])
init_targets = torch.cat([init_targets_0, init_targets_1])
# compute the gradient and take one step
outputs = net(init_inputs)
init_loss = criterion(outputs, init_targets)
all_grads = torch.autograd.grad(init_loss, params_list, create_graph=True)
# # Compute the loss w.r.t. the optimizer
# if args.gradinit_alg.lower() == 'adam':
# # grad-update inner product
gnorm = sum([g.abs().sum() for g in all_grads])
loss_grads = all_grads
# else:
# gnorm_sq = sum([g.square().sum() for g in all_grads])
# gnorm = gnorm_sq.sqrt()
# if args.gradinit_normalize_grad:
# loss_grads = [g / gnorm for g in all_grads]
# else:
# loss_grads = all_grads
total_gnorm += gnorm.item()
total_sums_gnorm += 1
if gnorm.item() > gradinit_gamma:
# project back into the gradient norm constraint
optimizer.zero_grad()
gnorm.backward()
optimizer.step(is_constraint=True)
cs_count += 1
else:
# take one optimization step
take_opt_step(net, loss_grads, eta=eta)
total_l0 += init_loss.item()
data_iter, inputs_2, targets_2 = get_batch(data_iter, dataloader)
if batch_no_overlap:
# sample a new batch for the half
data_iter, init_inputs_0, init_targets_0 = get_batch(data_iter, dataloader)
updated_inputs = torch.cat([init_inputs_0, inputs_2])
updated_targets = torch.cat([init_targets_0, targets_2])
# compute loss using the updated network
# net_top.opt_mode(True)
updated_outputs = net(updated_inputs)
# net_top.opt_mode(False)
updated_loss = criterion(updated_outputs, updated_targets)
# If eta is larger, we should expect obj_loss to be even smaller.
obj_loss = updated_loss / eta
recover_params(net)
optimizer.zero_grad()
obj_loss.backward()
optimizer.step(is_constraint=False)
total_l1 += updated_loss.item()
total_loss += obj_loss.item()
total_sums += 1
total_iters += 1
if (total_sums_gnorm > 0 and total_sums_gnorm % 10 == 0) or total_iters == gradinit_iters or total_iters == gradinit_iters:
stat_dict = get_scale_stats(net, optimizer)
print_str = "Iter {}, obj iters {}, eta {:.3e}, constraint count {} loss: {:.3e} ({:.3e}), init loss: {:.3e} ({:.3e}), update loss {:.3e} ({:.3e}), " \
"total gnorm: {:.3e} ({:.3e})\t".format(
total_sums_gnorm, total_sums, eta, cs_count,
float(obj_loss), total_loss / total_sums if total_sums > 0 else -1,
float(init_loss), total_l0 / total_sums if total_sums > 0 else -1,
float(updated_loss), total_l1 / total_sums if total_sums > 0 else -1,
float(gnorm), total_gnorm / total_sums_gnorm)
for key, val in stat_dict.items():
print_str += "{}: {:.2e}\t".format(key, val)
print(print_str)
if total_iters == gradinit_iters:
break
| 9,598 | 34.420664 | 163 | py |
Diverse-ViT | Diverse-ViT-main/mix.py | """ Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2020 Ross Wightman
"""
import numpy as np
import torch
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
onehot_y = torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
onehot_y = onehot_y.unsqueeze(2)
return onehot_y.repeat(1,1,196)
def mixup_target(target, num_classes, lam, patch_lam, smoothing=0.0, device='cuda'):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
mix_y = y1 * patch_lam + y2 * (1. - patch_lam)
single_y = y1[:,:,0] * lam + y2[:,:,0] * (1. - lam)
return mix_y.permute(0,2,1), single_y
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
bbox_area = (yh - yl) * (xh - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return yl, yh, xl, xh, lam
def patch_wise_lam(img_shape, yl, yh, xl, xh, device='cuda'):
binary_mask = torch.ones(img_shape[-2:], device=device)
binary_mask[yl:yh, xl:xh] = 0
# kernal-size = 16*16
# image-size = 224*224
patch_wise_lam = torch.ones(14,14, device=device)
for y_patch_idx in range(14):
for x_patch_idx in range(14):
ys = y_patch_idx*16
xs = x_patch_idx*16
patch_wise_lam[y_patch_idx, x_patch_idx] = torch.mean(binary_mask[ys:ys+16, xs:xs+16])
return patch_wise_lam.flatten()
class Mixup_diversity:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=0.8, cutmix_alpha=1.0, prob=1.0, switch_prob=0.5,
label_smoothing=0.1, num_classes=1000):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
def _params_per_batch(self):
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
lam = float(lam_mix)
return lam, use_cutmix
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
yl, yh, xl, xh, lam = rand_bbox(x.shape, lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] #x=0, x_flip=1
patch_lam = patch_wise_lam(x.shape, yl, yh, xl, xh)
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
patch_lam = torch.ones(196, device='cuda')*lam
return patch_lam, lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
patch_lam, lam = self._mix_batch(x)
patch_target, target = mixup_target(target, self.num_classes, lam, patch_lam, self.label_smoothing)
return x, patch_target, target
| 5,266 | 42.528926 | 120 | py |
GATNE | GATNE-master/src/main_pytorch.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy import random
from torch.nn.parameter import Parameter
from utils import *
def get_batches(pairs, neighbors, batch_size):
n_batches = (len(pairs) + (batch_size - 1)) // batch_size
for idx in range(n_batches):
x, y, t, neigh = [], [], [], []
for i in range(batch_size):
index = idx * batch_size + i
if index >= len(pairs):
break
x.append(pairs[index][0])
y.append(pairs[index][1])
t.append(pairs[index][2])
neigh.append(neighbors[pairs[index][0]])
yield torch.tensor(x), torch.tensor(y), torch.tensor(t), torch.tensor(neigh)
class GATNEModel(nn.Module):
def __init__(
self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features
):
super(GATNEModel, self).__init__()
self.num_nodes = num_nodes
self.embedding_size = embedding_size
self.embedding_u_size = embedding_u_size
self.edge_type_count = edge_type_count
self.dim_a = dim_a
self.features = None
if features is not None:
self.features = features
feature_dim = self.features.shape[-1]
self.embed_trans = Parameter(torch.FloatTensor(feature_dim, embedding_size))
self.u_embed_trans = Parameter(torch.FloatTensor(edge_type_count, feature_dim, embedding_u_size))
else:
self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.node_type_embeddings = Parameter(
torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)
)
self.trans_weights = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)
)
self.trans_weights_s1 = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)
)
self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))
self.reset_parameters()
def reset_parameters(self):
if self.features is not None:
self.embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.u_embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
else:
self.node_embeddings.data.uniform_(-1.0, 1.0)
self.node_type_embeddings.data.uniform_(-1.0, 1.0)
self.trans_weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, train_inputs, train_types, node_neigh):
if self.features is None:
node_embed = self.node_embeddings[train_inputs]
node_embed_neighbors = self.node_type_embeddings[node_neigh]
else:
node_embed = torch.mm(self.features[train_inputs], self.embed_trans)
node_embed_neighbors = torch.einsum('bijk,akm->bijam', self.features[node_neigh], self.u_embed_trans)
node_embed_tmp = torch.diagonal(node_embed_neighbors, dim1=1, dim2=3).permute(0, 3, 1, 2)
node_type_embed = torch.sum(node_embed_tmp, dim=2)
trans_w = self.trans_weights[train_types]
trans_w_s1 = self.trans_weights_s1[train_types]
trans_w_s2 = self.trans_weights_s2[train_types]
attention = F.softmax(
torch.matmul(
torch.tanh(torch.matmul(node_type_embed, trans_w_s1)), trans_w_s2
).squeeze(2),
dim=1,
).unsqueeze(1)
node_type_embed = torch.matmul(attention, node_type_embed)
node_embed = node_embed + torch.matmul(node_type_embed, trans_w).squeeze(1)
last_node_embed = F.normalize(node_embed, dim=1)
return last_node_embed
class NSLoss(nn.Module):
def __init__(self, num_nodes, num_sampled, embedding_size):
super(NSLoss, self).__init__()
self.num_nodes = num_nodes
self.num_sampled = num_sampled
self.embedding_size = embedding_size
self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.sample_weights = F.normalize(
torch.Tensor(
[
(math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
for k in range(num_nodes)
]
),
dim=0,
)
self.reset_parameters()
def reset_parameters(self):
self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, input, embs, label):
n = input.shape[0]
log_target = torch.log(
torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))
)
negs = torch.multinomial(
self.sample_weights, self.num_sampled * n, replacement=True
).view(n, self.num_sampled)
noise = torch.neg(self.weights[negs])
sum_log_sampled = torch.sum(
torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
).squeeze()
loss = log_target + sum_log_sampled
return -loss.sum() / n
def train_model(network_data, feature_dic):
vocab, index2word, train_pairs = generate(network_data, args.num_walks, args.walk_length, args.schema, file_name, args.window_size, args.num_workers, args.walk_file)
edge_types = list(network_data.keys())
num_nodes = len(index2word)
edge_type_count = len(edge_types)
epochs = args.epoch
batch_size = args.batch_size
embedding_size = args.dimensions
embedding_u_size = args.edge_dim
u_num = edge_type_count
num_sampled = args.negative_samples
dim_a = args.att_dim
att_head = 1
neighbor_samples = args.neighbor_samples
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
neighbors = generate_neighbors(network_data, vocab, num_nodes, edge_types, neighbor_samples)
features = None
if feature_dic is not None:
feature_dim = len(list(feature_dic.values())[0])
print('feature dimension: ' + str(feature_dim))
features = np.zeros((num_nodes, feature_dim), dtype=np.float32)
for key, value in feature_dic.items():
if key in vocab:
features[vocab[key].index, :] = np.array(value)
features = torch.FloatTensor(features).to(device)
model = GATNEModel(
num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features
)
nsloss = NSLoss(num_nodes, num_sampled, embedding_size)
model.to(device)
nsloss.to(device)
optimizer = torch.optim.Adam(
[{"params": model.parameters()}, {"params": nsloss.parameters()}], lr=1e-4
)
best_score = 0
test_score = (0.0, 0.0, 0.0)
patience = 0
for epoch in range(epochs):
random.shuffle(train_pairs)
batches = get_batches(train_pairs, neighbors, batch_size)
data_iter = tqdm(
batches,
desc="epoch %d" % (epoch),
total=(len(train_pairs) + (batch_size - 1)) // batch_size,
bar_format="{l_bar}{r_bar}",
)
avg_loss = 0.0
for i, data in enumerate(data_iter):
optimizer.zero_grad()
embs = model(data[0].to(device), data[2].to(device), data[3].to(device),)
loss = nsloss(data[0].to(device), embs, data[1].to(device))
loss.backward()
optimizer.step()
avg_loss += loss.item()
if i % 5000 == 0:
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss.item(),
}
data_iter.write(str(post_fix))
final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
for i in range(num_nodes):
train_inputs = torch.tensor([i for _ in range(edge_type_count)]).to(device)
train_types = torch.tensor(list(range(edge_type_count))).to(device)
node_neigh = torch.tensor(
[neighbors[i] for _ in range(edge_type_count)]
).to(device)
node_emb = model(train_inputs, train_types, node_neigh)
for j in range(edge_type_count):
final_model[edge_types[j]][index2word[i]] = (
node_emb[j].cpu().detach().numpy()
)
valid_aucs, valid_f1s, valid_prs = [], [], []
test_aucs, test_f1s, test_prs = [], [], []
for i in range(edge_type_count):
if args.eval_type == "all" or edge_types[i] in args.eval_type.split(","):
tmp_auc, tmp_f1, tmp_pr = evaluate(
final_model[edge_types[i]],
valid_true_data_by_edge[edge_types[i]],
valid_false_data_by_edge[edge_types[i]],
)
valid_aucs.append(tmp_auc)
valid_f1s.append(tmp_f1)
valid_prs.append(tmp_pr)
tmp_auc, tmp_f1, tmp_pr = evaluate(
final_model[edge_types[i]],
testing_true_data_by_edge[edge_types[i]],
testing_false_data_by_edge[edge_types[i]],
)
test_aucs.append(tmp_auc)
test_f1s.append(tmp_f1)
test_prs.append(tmp_pr)
print("valid auc:", np.mean(valid_aucs))
print("valid pr:", np.mean(valid_prs))
print("valid f1:", np.mean(valid_f1s))
average_auc = np.mean(test_aucs)
average_f1 = np.mean(test_f1s)
average_pr = np.mean(test_prs)
cur_score = np.mean(valid_aucs)
if cur_score > best_score:
best_score = cur_score
test_score = (average_auc, average_f1, average_pr)
patience = 0
else:
patience += 1
if patience > args.patience:
print("Early Stopping")
break
return test_score
if __name__ == "__main__":
args = parse_args()
file_name = args.input
print(args)
if args.features is not None:
feature_dic = load_feature_data(args.features)
else:
feature_dic = None
training_data_by_type = load_training_data(file_name + "/train.txt")
valid_true_data_by_edge, valid_false_data_by_edge = load_testing_data(
file_name + "/valid.txt"
)
testing_true_data_by_edge, testing_false_data_by_edge = load_testing_data(
file_name + "/test.txt"
)
average_auc, average_f1, average_pr = train_model(training_data_by_type, feature_dic)
print("Overall ROC-AUC:", average_auc)
print("Overall PR-AUC", average_pr)
print("Overall F1:", average_f1)
| 10,920 | 36.400685 | 169 | py |
Viola-Unet | Viola-Unet-main/main.py | import argparse, os
import time
import numpy as np
import torch
from load_model import load_model, infer_seg, nibout, infer_seg_3
from load_data import load_data, post_process, read_raw_image
from monai.transforms import SaveImaged
from monai.data import decollate_batch
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ICH segmentation of a ct volume')
parser.add_argument('--input_dir', default='', type=str, metavar='PATH',
help='this directory contains all test samples(ct volumes)')
parser.add_argument('--predict_dir', default='', type=str, metavar='PATH',
help='segmentation file of each test sample should be stored in the directory')
args = parser.parse_args()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
models = [] # ensemble models, stack together
models.append(load_model(network="nnUNet", kf="kf0", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf1", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf2", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf3", device=device).eval())
models.append(load_model(network="nnUNet", kf="kf4", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf0", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf1", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf2", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf3", device=device).eval())
models.append(load_model(network="ViolaUNet_l", kf="kf4", device=device).eval())
test_file_list, dataloader = load_data(args.input_dir)
with torch.no_grad():
num_scans = len(dataloader)
for i, d in enumerate(dataloader):
path, filename = os.path.split(test_file_list[i]['image'])
raw_data = read_raw_image(test_file_list[i])
raw_img = raw_data["image"]
pixdims = raw_data["image_meta_dict"]["pixdim"][1:4]
pix_volume = pixdims[0] * pixdims[1] * pixdims[2] # mm^3
images = d["image"].to(device)
print('\n------------------start predicting input volume: {0} - {1}/{2} -------------------'.format(filename, i + 1, num_scans))
# print("image size after preprocessed: ", images.size())
_, _, h, w, z = images.size()
print("h, w, z: ", images.size())
max_size = h if h>w else w
max_size = max_size if max_size > z else z
overlap = 1-(max_size - 160)/(2*160)
overlap = 0 if overlap<0 else round(overlap, 2)
print("overlap: ", overlap)
### make sure the last slize is z-axial, z must be smallest number
reshape=None
mid_reshape=None # switch w and z
if h<z and h<w:
print("We discovered some errors in the head information, tried to fix here but the predict will still not work well in this case...")
reshape=(0, 1, 4, 3, 2)
images=images.permute(reshape)
# print("reshaped input", images.size())
elif w<z and w<h:
mid_reshape=(0, 1, 2, 4, 3)
# images=images.permute(reshape)
# we can fix the error head infor, but the model were trained with this error case, so at this time, we don't fix it
# we leave this error for future work
print("We discovered some errors in the head information without trying to fix it, so the predict will not work well in this case...")
start_time = time.time()
pred_outputs = list()
for m in models: # in this case, we only have one model
pred = infer_seg(images, m, overlap=overlap)
pred_outputs.append(pred)
if mid_reshape is not None:
print("try using corrected oritentation to infer ...")
pred2 = infer_seg(images.permute(mid_reshape), m, overlap=overlap).permute(mid_reshape)
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1))<torch.sum(torch.argmax(torch.softmax(pred2, 1), 1)):
pred_outputs.append(pred2)
else:
print("did not use the corrected oritentation ...")
# # do aumentation if bleeds are too small or not found
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True))<10.:
print('small object, trying to TTA boost ...')
pred = infer_seg_3(images, m, flip_axis=[2], overlap=overlap)
# print(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True).size())
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True))>10.:
# pred_outputs.pop()
pred_outputs.append(pred)
print('augmented by flip2')
pred = infer_seg_3(images, m, overlap=overlap, flip_axis=[1, 2], rot=1)
if torch.sum(torch.argmax(torch.softmax(pred, 1), 1, keepdim=True))>10.:
# pred_outputs.pop()
pred_outputs.append(pred)
print('augmented by flip 1-2 and rot 1')
d["pred"] = torch.mean(torch.stack(pred_outputs, dim=0), dim=0, keepdim=True).squeeze(0)
# print(d["pred"].size())
if reshape is not None:
d["pred"] = d["pred"].permute(reshape)
# print('reshaped size: ',d["pred"].size())
d = [post_process(img) for img in decollate_batch(d)]
d[0]["pred"] = torch.argmax(d[0]["pred"], 0, keepdim=True)
lesion_volume = torch.sum(d[0]["pred"]) * pix_volume / 1000.
print('Predicted lesion volume : {:.3f} ml'.format(lesion_volume))
d[0]["pred"] = d[0]["pred"].squeeze(0)
nibout(
d[0]["pred"].cpu().detach().numpy().astype(np.uint8),
args.predict_dir,
test_file_list[i]['image']
)
print('--------Cost time: {:.3f} sec --------'.format(time.time() - start_time))
| 6,412 | 49.496063 | 150 | py |
Viola-Unet | Viola-Unet-main/load_model.py | import os
import torch
import nibabel as nib
from monai.inferers import sliding_window_inference
from monai.transforms.utils import map_spatial_axes
from monai.data import decollate_batch
from viola_unet import ViolaUNet
from monai.networks.nets import DynUNet
wind_levels = [[0,100], [-15, 200],[-100, 1300]]
spacing = [0.45100001*2, 0.45100001*2, 4.99709511]
patch_size = (160, 160, 32)
patch_overlap = 0.3 # 0.5 for last validation submission
sw_bt_size=1
# debug new setting
# patch_size = (192, 192, 24)
# patch_overlap = 0.75 # 0.5 for last validation submission
# # # last validation submission weights
# net_weights = {
# "ViolaUNet_l":{
# "kf0": "./best_ckpt1/viola/model_epoch_8640_dice_0.80106_lr_0.0000869017.pt",
# "kf1": "./best_ckpt1/viola/model_epoch_4656_dice_0.76015_lr_0.0019869438.pt", # new ft 0.76887
# "kf2": "./best_ckpt1/viola/model_epoch_5940_dice_0.81959_lr_0.0000937899.pt", # new ft 0.82135
# "kf3": "./best_ckpt1/viola/model_epoch_37728_dice_0.78699_lr_0.0033099166.pt", # new ft 0.78784
# "kf4": "./best_ckpt1/viola/model_epoch_12054_dice_0.78984_lr_0.0000753008.pt",
# },
# "nnUNet":{
# "kf0": "./best_ckpt1/nnu/model_epoch_19296_dice_0.80530_lr_0.0042245472.pt",
# "kf1": "./best_ckpt1/nnu/model_epoch_38412_dice_0.76024_lr_0.0022887478.pt", # ft new 0.76780
# "kf2": "./best_ckpt1/nnu/model_epoch_24651_dice_0.81655_lr_0.0037515006.pt", # ft new 0.81775
# "kf3": "./best_ckpt1/nnu/model_epoch_1152_dice_0.79311_lr_0.0049999435.pt",
# "kf4": "./best_ckpt1/nnu/model_epoch_5292_dice_0.78911_lr_0.0049550524.pt",
# },
# }
# # re-fine-tuned version after the last validation submission weights
net_weights = {
"ViolaUNet_l":{
"kf0": "./best_ckpt2/viola/model_epoch_8640_dice_0.80106_lr_0.0000869017.pt",
"kf1": "./best_ckpt2/viola/model_epoch_5820_dice_0.76887_lr_0.0029100000.pt", # new ft 0.76887
"kf2": "./best_ckpt2/viola/model_epoch_297_dice_0.82135_lr_0.0001485000.pt", # new ft 0.82135
"kf3": "./best_ckpt2/viola/model_epoch_288_dice_0.78784_lr_0.0001440000.pt", # new ft 0.78784
"kf4": "./best_ckpt2/viola/model_epoch_12054_dice_0.78984_lr_0.0000753008.pt",
},
"nnUNet":{
"kf0": "./best_ckpt2/nnu/model_epoch_19296_dice_0.80530_lr_0.0042245472.pt",
"kf1": "./best_ckpt2/nnu/model_epoch_6693_dice_0.76780_lr_0.0033465000.pt", # ft new 0.76780
"kf2": "./best_ckpt2/nnu/model_epoch_18414_dice_0.81775_lr_0.0047762088.pt", # ft new 0.81775
"kf3": "./best_ckpt2/nnu/model_epoch_1152_dice_0.79311_lr_0.0049999435.pt",
"kf4": "./best_ckpt2/nnu/model_epoch_5292_dice_0.78911_lr_0.0049550524.pt",
},
}
def load_model(network="ViolaUNet_l", kf="kf0", device='cpu', ckpt=True):
if network == "ViolaUNet_l":
model = ViolaUNet(
spatial_dims=3,
in_channels=3,
out_channels=2,
kernel_size=[[3, 3, 1], [3, 3, 1], [3, 3, 1], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
strides=[[1, 1, 1], [2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
upsample_kernel_size=[[2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
filters=(32, 64, 96, 128, 192, 256, 320),
dec_filters=(32, 64, 96, 128, 192, 256),
norm_name=("BATCH", {"affine": True}),
act_name=("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
dropout=0.2,
deep_supervision=True,
deep_supr_num=4,
res_block=True,
trans_bias=True,
viola_att = True,
gated_att = False,
sum_deep_supr = False,
)
elif network == 'nnUNet':
model = DynUNet(
spatial_dims=3,
in_channels=3,
out_channels=2,
kernel_size=[[3, 3, 1], [3, 3, 1], [3, 3, 1], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
strides=[[1, 1, 1], [2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
upsample_kernel_size=[[2, 2, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 1], [1, 1, 1]],
filters=(32, 64, 96, 128, 192, 256, 320),
dropout=0.2,
norm_name=("INSTANCE", {"affine": True}),
act_name=("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
deep_supervision=True,
deep_supr_num=4,
res_block=True,
trans_bias=True,
)
# elif network == 'ViolaUNet_s': # for paper figure 1
# model = ViolaUNet(
# spatial_dims=3,
# in_channels=3,
# out_channels=2,
# kernel_size=[[3, 3, 1], [3, 3, 1], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 1]],
# strides=[[1, 1, 1], [2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 1, 1]],
# upsample_kernel_size=[[2, 2, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 1, 1]],
# filters=(32, 64, 96, 128, 192, 256, 320),
# dec_filters=(32, 64, 96, 128, 128, 128),
# norm_name=("BATCH", {"affine": True}),
# act_name=("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
# dropout=0.2,
# deep_supervision=True,
# deep_supr_num=2,
# res_block=False,
# trans_bias=True,
# viola_att = True,
# gated_att = False,
# sum_deep_supr = False,
# )
else:
print("Not support the network currently - ", network)
return None
if ckpt and network != 'ViolaUNet_s': #
pretrain = torch.load(net_weights[network][kf], map_location=device)
model.load_state_dict(pretrain['state_dict'])
print("model {}-{} loaded successfully!".format(network, kf))
return model.to(device)
def infer_seg(images, model,
roi_size=patch_size, sw_batch_size=sw_bt_size, overlap=patch_overlap,
flip_axis=-1, rot=0):
if rot>0 and rot<4:
val_outputs = sliding_window_inference(
torch.stack([torch.rot90(k, rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(images)]),
roi_size, sw_batch_size, model, overlap=overlap)
val_outputs = torch.stack([torch.rot90(k, 4-rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(val_outputs)])
elif flip_axis>=0 and flip_axis<3 :
val_outputs = sliding_window_inference(
torch.stack([torch.flip(k, map_spatial_axes(k.ndim, flip_axis)) for k in decollate_batch(images)]),
roi_size, sw_batch_size, model, overlap=overlap)
val_outputs = torch.stack([torch.flip(k, map_spatial_axes(k.ndim, flip_axis)) for k in decollate_batch(val_outputs)])
else:
val_outputs = sliding_window_inference(
images, roi_size, sw_batch_size, model, overlap=overlap)
return val_outputs
def infer_seg_3(images, model, roi_size=patch_size, sw_batch_size=1, overlap=patch_overlap,
flip_axis=[0, 1, 2], rot=-1):
for axis in flip_axis:
images = torch.stack([torch.flip(k, map_spatial_axes(k.ndim, axis)) for k in decollate_batch(images)])
if rot > 0 and rot < 4:
images = torch.stack([torch.rot90(k, rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(images)])
val_outputs = sliding_window_inference(images, roi_size, sw_batch_size, model, overlap=overlap)
if rot > 0 and rot < 4:
val_outputs = torch.stack(
[torch.rot90(k, 4 - rot, map_spatial_axes(k.ndim, (0, 1))) for k in decollate_batch(val_outputs)])
for axis in flip_axis:
val_outputs = torch.stack([torch.flip(k, map_spatial_axes(k.ndim, axis)) for k in decollate_batch(val_outputs)])
return val_outputs
def nibout(segmentation, outputpath, imagepath):
"""
save your predictions
:param segmentation:Your prediction , the data type is "array".
:param outputpath:The save path of prediction results.
:param imagepath:The path of the image corresponding to the prediction result.
:return:
"""
# print(outputpath)
path, filename = os.path.split(imagepath)
print(filename)
image = nib.load(imagepath)
segmentation = nib.Nifti1Image(segmentation, image.affine)
qform = image.get_qform()
segmentation.set_qform(qform)
sfrom = image.get_sform()
segmentation.set_sform(sfrom)
nib.save(segmentation, os.path.join(outputpath, filename))
# import time
if __name__ == '__main__':
# _, channel, _, _, _ = input.shape
# check model load and param size
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model = load_model(network="ViolaUNet_l", device=device).eval()
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
print(f'Trainable params: {sum([p.numel() for p in trainable_params])/1000**2.} M')
# for kf in ["kf0", 'kf1', 'kf2', 'kf3', 'kf4']:
# model = load_model(network="ViolaUNet_s", kf=kf, device=device).eval()
# output = model(input)
# print(output.size())
# --- test inference speed -----------------------------
# input = torch.randn(1, 3, 512, 512, 32).cuda()
# input = torch.autograd.Variable(torch.sigmoid(torch.randn(1, 3, 512, 512, 32)), requires_grad=False).cuda()
# output = model(input)
# start_time = time.time()
# output = infer_seg(input, model)
# print('--------Cost time: {:.3f} sec --------'.format(time.time() - start_time))
| 9,563 | 43.691589 | 130 | py |
Viola-Unet | Viola-Unet-main/viola_unet.py | # ViolaUNet is based on DynUNet
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import interpolate
from monai.networks.blocks.convolutions import Convolution
from monai.networks.layers.factories import Norm
from monai.networks.blocks.dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, get_conv_layer
def initialize_weights(*models):
for model in models:
for module in model.modules():
if isinstance(module, (nn.Conv3d, nn.Conv2d, nn.ConvTranspose3d, nn.ConvTranspose2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight, a=0.01, mode='fan_in', nonlinearity='leaky_relu')
# nn.init.xavier_normal_(module.weight, gain=1.)
# module.weight = nn.init.orthogonal_(module.weight, gain=1.)
if module.bias is not None:
module.bias.data.zero_()
# module.bias = nn.init.constant_(module.bias, 0)
elif isinstance(module, (nn.BatchNorm3d, nn.BatchNorm2d)):
# module.weight.data.fill_(1)
nn.init.normal_(module.weight.data, 1.0, 0.02)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.Linear)):
nn.init.normal_(module.weight.data, 1.0, 0.001)
if module.bias is not None:
module.bias.data.zero_()
class group_norm(nn.GroupNorm):
def __init__(self, num_groups, num_channels, permute=False): #num_features):
super(group_norm, self).__init__(num_groups, num_channels)
self.perm = permute # change shape from: b hwd c to: b c hwd
def forward(self, x):
# input shape : b 1 hwd c or b 1 c hwd, so first squeeze dim=1, then change shape to b c hwd
if self.perm:
return super(group_norm, self).forward(x.squeeze(1).permute(0, 2, 1)).permute(0, 2, 1).unsqueeze(1)
else:
# return super(BatchNorm_GCN, self).forward(x.permute(0, 2, 1)).permute(0, 2, 1)
return super(group_norm, self).forward(x.squeeze(1)).unsqueeze(1)
class LayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (var + self.eps).sqrt() * self.gamma
def l2norm(t):
return F.normalize(t, dim = -1)
class ddcmBlock_silu(nn.Module):
def __init__(self, in_dim, out_dim, rates, strides=None, kernel=3, bias=False, dropout=0.1):
super(ddcmBlock_silu, self).__init__()
self.features = []
self.num = len(rates)
self.in_dim = in_dim
self.out_dim = out_dim
if strides is None:
self.strides = [1 for i in range(self.num)]
else:
self.strides = strides
for idx, rate in enumerate(rates):
self.features.append(nn.Sequential(
nn.Conv2d(self.in_dim + idx * out_dim,
out_dim,
kernel_size=(kernel, 1),
dilation=rate,
stride=(self.strides[idx],1),
padding=(rate * (kernel - 1) // 2, 0),
bias=bias),
nn.Dropout(p=dropout)
)
)
self.features = nn.ModuleList(self.features)
self.conv1x1_out = nn.Sequential(
nn.SiLU(inplace=True),
nn.Conv2d(self.in_dim*2 + out_dim * self.num, self.in_dim, kernel_size=1, bias=False),
)
initialize_weights(self.conv1x1_out, self.features)
def forward(self, x):
b,*_ = x.size()
x = torch.squeeze(x)
if b==1:
x = x.unsqueeze(0)
if x.ndim == 2:
x = x.unsqueeze(-1)
x = x.unsqueeze(1)
_, _, H, W = x.size()
xc = x.clone()
for f in self.features:
x = torch.cat([F.interpolate(f(x), (H, W), mode='bilinear', align_corners=False), x], 1)
x = self.conv1x1_out(torch.cat([xc, x], 1))
return x
class viola_attx_ddcm_dyk(nn.Module):
def __init__(self, channel, reduction=16, min_dim=4, k_size=3):
super(viola_attx_ddcm_dyk, self).__init__()
self.x_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.y_pool = nn.AdaptiveAvgPool3d((1, None, 1))
self.z_pool = nn.AdaptiveAvgPool3d((1, 1, None))
self.sig = nn.Sigmoid()
self.act = nn.Sequential(
group_norm(2, channel),
nn.Tanh(),
)
self.relu = nn.ReLU(inplace=False)
d = max(channel // reduction, min_dim)
ck_size = k_size + 2*(channel//32)
ration = [1, ck_size, 2 * (ck_size - 1) + 1, 3 * (ck_size - 1) + 1]
strides = [2, 2, 4, 4]
ext_ch = d // 4 + 1
self.xconv = nn.Sequential(
LayerNorm(channel),
ddcmBlock_silu(1, ext_ch, ration, strides=strides, kernel=ck_size, bias=False)
)
self.yconv = nn.Sequential(
LayerNorm(channel),
ddcmBlock_silu(1, ext_ch, ration, strides=strides, kernel=ck_size, bias=False)
)
self.zconv = nn.Sequential(
LayerNorm(channel),
ddcmBlock_silu(1, ext_ch, ration, strides=strides, kernel=ck_size, bias=False)
)
initialize_weights(self.xconv, self.yconv, self.zconv, self.act)
def forward(self, x):
b, c, h, w, d = x.size()
vx = self.xconv(self.x_pool(x))
vy = self.yconv(self.y_pool(x))
vz = self.zconv(self.z_pool(x))
xs = self.sig(vx)
ys = self.sig(vy)
zs = self.sig(vz)
vxyz = self.act(torch.cat((vx, vy, vz), 3)) # b, 1, c, h+w+d
xt = 0.5 * (vxyz[:, :, :, 0:h] + xs)
yt = 0.5 * (vxyz[:, :, :, h:h+w] + ys)
zt = 0.5 * (vxyz[:, :, :, h+w:h+w+d] + zs)
xs = xs.view(b, c, h, 1, 1)
ys = ys.view(b, c, 1, w, 1)
zs = zs.view(b, c, 1, 1, d)
xt = xt.view(b, c, h, 1, 1)
yt = yt.view(b, c, 1, w, 1)
zt = zt.view(b, c, 1, 1, d)
viola_j = xs * ys + ys*zs + zs*xs # 0-3
viola_m = xs * ys * zs # 0-1
viola_a = self.relu(xt + yt + zt) # 0-3
viola = viola_j + viola_m + viola_a
viola = 0.1 * viola + 0.3
viola = viola + l2norm(viola.contiguous().view(b,-1)).view(b,c,h,w,d)
return x * viola
class GatedAttentionBlock(nn.Module):
def __init__(self, spatial_dims: int, f_int: int, f_g: int, f_l: int, dropout=0.0):
super().__init__()
self.W_g = nn.Sequential(
Convolution(
spatial_dims=spatial_dims,
in_channels=f_g,
out_channels=f_int,
kernel_size=1,
strides=1,
padding=0,
dropout=dropout,
conv_only=True,
),
Norm[Norm.BATCH, spatial_dims](f_int),
)
self.W_x = nn.Sequential(
Convolution(
spatial_dims=spatial_dims,
in_channels=f_l,
out_channels=f_int,
kernel_size=1,
strides=1,
padding=0,
dropout=dropout,
conv_only=True,
),
Norm[Norm.BATCH, spatial_dims](f_int),
)
self.psi = nn.Sequential(
Convolution(
spatial_dims=spatial_dims,
in_channels=f_int,
out_channels=1,
kernel_size=1,
strides=1,
padding=0,
dropout=dropout,
conv_only=True,
),
Norm[Norm.BATCH, spatial_dims](1),
nn.Sigmoid(),
)
self.relu = nn.ReLU()
def forward(self, g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
g1 = self.W_g(g)
x1 = self.W_x(x)
psi: torch.Tensor = self.relu(g1 + x1)
psi = self.psi(psi)
return x * psi
class DynUNetSkipLayer(nn.Module):
"""
Defines a layer in the UNet topology which combines the downsample and upsample pathways with the skip connection.
The member `next_layer` may refer to instances of this class or the final bottleneck layer at the bottom the UNet
structure. The purpose of using a recursive class like this is to get around the Torchscript restrictions on
looping over lists of layers and accumulating lists of output tensors which must be indexed. The `heads` list is
shared amongst all the instances of this class and is used to store the output from the supervision heads during
forward passes of the network.
"""
heads: Optional[List[torch.Tensor]]
def __init__(self, index, downsample, upsample, next_layer, heads=None, super_head=None):
super().__init__()
self.downsample = downsample
self.next_layer = next_layer
self.upsample = upsample
self.super_head = super_head
self.heads = heads
self.index = index
def forward(self, x):
downout = self.downsample(x)
nextout = self.next_layer(downout)
upout = self.upsample(nextout, downout)
if self.super_head is not None and self.heads is not None and self.index > 0:
self.heads[self.index - 1] = self.super_head(upout)
return upout
class UnetUpBlock_x_ddcm(nn.Module):
"""
An upsampling module that can be used for DynUNet, based on:
`Automated Design of Deep Learning Methods for Biomedical Image Segmentation <https://arxiv.org/abs/1904.08128>`_.
`nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation <https://arxiv.org/abs/1809.10486>`_.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
kernel_size: convolution kernel size.
stride: convolution stride.
upsample_kernel_size: convolution kernel size for transposed convolution layers.
norm_name: feature normalization type and arguments.
act_name: activation layer type and arguments.
dropout: dropout probability.
trans_bias: transposed convolution bias.
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
skip_channels: int,
kernel_size: Union[Sequence[int], int],
stride: Union[Sequence[int], int],
upsample_kernel_size: Union[Sequence[int], int],
norm_name: Union[Tuple, str],
act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
dropout: Optional[Union[Tuple, str, float]] = None,
trans_bias: bool = False,
my_att: bool = True,
skip_att: bool = False,#True,
):
super().__init__()
self.myatt = my_att
self.skipatt = skip_att
upsample_stride = upsample_kernel_size
if skip_att:
self.attention = GatedAttentionBlock(
spatial_dims=spatial_dims,
f_g=in_channels,
f_l=skip_channels,
f_int=in_channels // 2
# dropout=0.15
)
else: self.attention = None
self.transp_conv = get_conv_layer(
spatial_dims,
in_channels,
out_channels,
kernel_size=upsample_kernel_size,
stride=upsample_stride,
dropout=dropout,
bias=trans_bias,
conv_only=True,
is_transposed=True,
)
if my_att:
self.canc_att = viola_attx_ddcm_dyk(
channel=out_channels + skip_channels,
reduction=16, min_dim=4, k_size=3
)
else:
self.canc_att = None
self.conv_block = UnetBasicBlock(
spatial_dims,
out_channels + skip_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dropout=dropout,
norm_name=norm_name,
act_name=act_name,
)
def forward(self, inp, skip):
# number of channels for skip should equals to out_channels
out = self.transp_conv(inp)
if self.skipatt:
att = self.attention(
F.interpolate(inp, skip.size()[2:], mode='trilinear', align_corners=False),
skip
)
out = torch.cat((att, out), dim=1)
else:
out = torch.cat((skip, out), dim=1)
if self.myatt:
out = self.canc_att(out)
out = self.conv_block(out)
return out
class ViolaUNet(nn.Module):
"""
This reimplementation of ViolaUNet is based on dynamic UNet of Monai:
This model is more flexible compared with ``monai.networks.nets.UNet`` in three
places:
- Residual connection is supported in conv blocks.
- Anisotropic kernel sizes and strides can be used in each layers.
- Deep supervision heads can be added and sumup during inference.
- Encoder and Decoder can have insymetric filter numbers
- Support both Viola attention and gated attention methods.
The model supports 2D or 3D inputs and is consisted with four kinds of blocks:
one input block, `n` downsample blocks, one bottleneck and `n+1` upsample blocks. Where, `n>0`.
The first and last kernel and stride values of the input sequences are used for input block and
bottleneck respectively, and the rest value(s) are used for downsample and upsample blocks.
Therefore, pleasure ensure that the length of input sequences (``kernel_size`` and ``strides``)
is no less than 3 in order to have at least one downsample and upsample blocks.
To meet the requirements of the structure, the input size for each spatial dimension should be divisible
by the product of all strides in the corresponding dimension. In addition, the minimal spatial size should have
at least one dimension that has twice the size of the product of all strides.
For example, if `strides=((1, 2, 4), 2, 2, 1)`, the spatial size should be divisible by `(4, 8, 16)`,
and the minimal spatial size is `(8, 8, 16)` or `(4, 16, 16)` or `(4, 8, 32)`.
The output size for each spatial dimension equals to the input size of the corresponding dimension divided by the
stride in strides[0].
For example, if `strides=((1, 2, 4), 2, 2, 1)` and the input size is `(64, 32, 32)`, the output size is `(64, 16, 8)`.
For backwards compatibility with old weights, please set `strict=False` when calling `load_state_dict`.
Usage example with medical segmentation decathlon dataset is available at:
https://github.com/Project-MONAI/tutorials/tree/master/modules/dynunet_pipeline.
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
kernel_size: convolution kernel size.
strides: convolution strides for each blocks.
upsample_kernel_size: convolution kernel size for transposed convolution layers. The values should
equal to strides[1:].
filters: number of output channels for each encoder blocks. Different from nnU-Net, in this implementation we add
this argument to make the network more flexible. As shown in the third reference, one way to determine
this argument is like:
``[64, 96, 128, 192, 256, 384, 512, 768, 1024][: len(strides)]``.
The above way is used in the network that wins task 1 in the BraTS21 Challenge.
If not specified, the way which nnUNet used will be employed. Defaults to ``None``.
dec_filters: number of output channels for each decoder blocks.
If not specified, the way which nnUNet used will be employed. Defaults to ``None``.
dropout: dropout ratio. Defaults to no dropout.
norm_name: feature normalization type and arguments. Defaults to ``INSTANCE``.
`INSTANCE_NVFUSER` is a faster version of the instance norm layer, it can be used when:
1) `spatial_dims=3`, 2) CUDA device is available, 3) `apex` is installed and 4) non-Windows OS is used.
act_name: activation layer type and arguments. Defaults to ``leakyrelu``.
deep_supervision: whether to add deep supervision head before output. Defaults to ``False``.
If ``True``, in training mode, the forward function will output not only the final feature map
(from `output_block`), but also the feature maps that come from the intermediate up sample layers.
In order to unify the return type (the restriction of TorchScript), all intermediate
feature maps are interpolated into the same size as the final feature map and stacked together
(with a new dimension in the first axis)into one single tensor.
For instance, if there are two intermediate feature maps with shapes: (1, 2, 16, 12) and
(1, 2, 8, 6), and the final feature map has the shape (1, 2, 32, 24), then all intermediate feature maps
will be interpolated into (1, 2, 32, 24), and the stacked tensor will has the shape (1, 3, 2, 32, 24).
When calculating the loss, you can use torch.unbind to get all feature maps can compute the loss
one by one with the ground truth, then do a weighted average for all losses to achieve the final loss.
deep_supr_num: number of feature maps that will output during deep supervision head. The
value should be larger than 0 and less than the number of up sample layers.
Defaults to 1.
res_block: whether to use residual connection based convolution blocks during the network.
Defaults to ``False``.
trans_bias: whether to set the bias parameter in transposed convolution layers. Defaults to ``False``.
viola_att: whether to use viola attention module during the network. Defaults to ``True``.
gated_att: whether to use gated attention module during the network. Defaults to ``False``.
sum_deep_supr: whether to sum up all output (deep supervision) during inference. Defaults to ``False``.
"""
def __init__(
self,
spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: Sequence[Union[Sequence[int], int]],
strides: Sequence[Union[Sequence[int], int]],
upsample_kernel_size: Sequence[Union[Sequence[int], int]],
filters: Optional[Sequence[int]] = None, # up to bottom
dec_filters: Optional[Sequence[int]] = None, # bottom to up
dropout: Optional[Union[Tuple, str, float]] = None,
norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}),
act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}),
deep_supervision: bool = False,
deep_supr_num: int = 1,
res_block: bool = False,
trans_bias: bool = False,
viola_att: bool = True,
gated_att: bool = False,
sum_deep_supr: bool = False,
):
super().__init__()
self.spatial_dims = spatial_dims
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.strides = strides
self.upsample_kernel_size = upsample_kernel_size
self.norm_name = norm_name
self.act_name = act_name
self.dropout = dropout
self.conv_block = UnetResBlock if res_block else UnetBasicBlock
self.trans_bias = trans_bias
self.my_att = viola_att
self.skip_att = gated_att
self.sum_deep_supr = sum_deep_supr
if filters is not None:
self.filters = filters
self.check_filters()
else:
self.filters = [min(2 ** (5 + i), 320 if spatial_dims == 3 else 512) for i in range(len(strides))]
if dec_filters is None:
self.dec_filters = self.filters
else:
self.dec_filters = dec_filters
self.input_block = self.get_input_block()
self.downsamples = self.get_downsamples()
self.bottleneck = self.get_bottleneck()
self.upsamples = self.get_upsamples()
self.output_block = self.get_output_block(0)
self.deep_supervision = deep_supervision
self.deep_supr_num = deep_supr_num
# initialize the typed list of supervision head outputs so that Torchscript can recognize what's going on
self.heads: List[torch.Tensor] = [torch.rand(1)] * self.deep_supr_num
if self.deep_supervision:
self.deep_supervision_heads = self.get_deep_supervision_heads()
self.check_deep_supr_num()
self.apply(self.initialize_weights)
self.check_kernel_stride()
def create_skips(index, downsamples, upsamples, bottleneck, superheads=None):
"""
Construct the UNet topology as a sequence of skip layers terminating with the bottleneck layer. This is
done recursively from the top down since a recursive nn.Module subclass is being used to be compatible
with Torchscript. Initially the length of `downsamples` will be one more than that of `superheads`
since the `input_block` is passed to this function as the first item in `downsamples`, however this
shouldn't be associated with a supervision head.
"""
if len(downsamples) != len(upsamples):
raise ValueError(f"{len(downsamples)} != {len(upsamples)}")
if len(downsamples) == 0: # bottom of the network, pass the bottleneck block
return bottleneck
if superheads is None:
next_layer = create_skips(1 + index, downsamples[1:], upsamples[1:], bottleneck)
return DynUNetSkipLayer(index, downsample=downsamples[0], upsample=upsamples[0], next_layer=next_layer)
super_head_flag = False
if index == 0: # don't associate a supervision head with self.input_block
rest_heads = superheads
else:
if len(superheads) > 0:
super_head_flag = True
rest_heads = superheads[1:]
else:
rest_heads = nn.ModuleList()
# create the next layer down, this will stop at the bottleneck layer
next_layer = create_skips(1 + index, downsamples[1:], upsamples[1:], bottleneck, superheads=rest_heads)
if super_head_flag:
return DynUNetSkipLayer(
index,
downsample=downsamples[0],
upsample=upsamples[0],
next_layer=next_layer,
heads=self.heads,
super_head=superheads[0],
)
return DynUNetSkipLayer(index, downsample=downsamples[0], upsample=upsamples[0], next_layer=next_layer)
if not self.deep_supervision:
self.skip_layers = create_skips(
0, [self.input_block] + list(self.downsamples), self.upsamples[::-1], self.bottleneck
)
else:
self.skip_layers = create_skips(
0,
[self.input_block] + list(self.downsamples),
self.upsamples[::-1],
self.bottleneck,
superheads=self.deep_supervision_heads,
)
def check_kernel_stride(self):
kernels, strides = self.kernel_size, self.strides
error_msg = "length of kernel_size and strides should be the same, and no less than 3."
if len(kernels) != len(strides) or len(kernels) < 3:
raise ValueError(error_msg)
for idx, k_i in enumerate(kernels):
kernel, stride = k_i, strides[idx]
if not isinstance(kernel, int):
error_msg = f"length of kernel_size in block {idx} should be the same as spatial_dims."
if len(kernel) != self.spatial_dims:
raise ValueError(error_msg)
if not isinstance(stride, int):
error_msg = f"length of stride in block {idx} should be the same as spatial_dims."
if len(stride) != self.spatial_dims:
raise ValueError(error_msg)
def check_deep_supr_num(self):
deep_supr_num, strides = self.deep_supr_num, self.strides
num_up_layers = len(strides) - 1
if deep_supr_num >= num_up_layers:
raise ValueError("deep_supr_num should be less than the number of up sample layers.")
if deep_supr_num < 1:
raise ValueError("deep_supr_num should be larger than 0.")
def check_filters(self):
filters = self.filters
if len(filters) < len(self.strides):
raise ValueError("length of filters should be no less than the length of strides.")
else:
self.filters = filters[: len(self.strides)]
def forward(self, x):
out = self.skip_layers(x)
out = self.output_block(out)
if self.training and self.deep_supervision:
out_all = [out]
for feature_map in self.heads:
out_all.append(interpolate(feature_map, out.shape[2:]))
return torch.stack(out_all, dim=1)
elif self.deep_supervision and self.sum_deep_supr:
out = F.softmax(out, 1)
for i, feature_map in enumerate(self.heads):
out_ds = F.softmax(interpolate(feature_map, out.shape[2:]), 1)
out_ds = 0.5**(i+1) * out_ds
out = out + out_ds
return out
def get_input_block(self):
return self.conv_block(
self.spatial_dims,
self.in_channels,
self.filters[0],
self.kernel_size[0],
self.strides[0],
self.norm_name,
self.act_name,
dropout=self.dropout,
)
def get_bottleneck(self):
return self.conv_block(
self.spatial_dims,
self.filters[-2],
self.filters[-1],
self.kernel_size[-1],
self.strides[-1],
self.norm_name,
self.act_name,
dropout=self.dropout,
)
def get_output_block(self, idx: int):
return UnetOutBlock(self.spatial_dims, self.dec_filters[idx], self.out_channels, dropout=self.dropout)
def get_downsamples(self):
inp, out = self.filters[:-2], self.filters[1:-1]
strides, kernel_size = self.strides[1:-1], self.kernel_size[1:-1]
return self.get_module_list(inp, out, out, kernel_size, strides, self.conv_block)
def get_upsamples(self):
# inp, out = self.dec_filters[1:][::-1], self.dec_filters[:-1][::-1]
# skip_c = self.filters[:-1][::-1]
inp, out = (self.filters[-1], *self.dec_filters[::-1]), self.dec_filters[::-1]
skip_c = self.filters[:-1][::-1]
strides, kernel_size = self.strides[1:][::-1], self.kernel_size[1:][::-1]
upsample_kernel_size = self.upsample_kernel_size[::-1]
return self.get_module_list(
inp, out, skip_c, kernel_size, strides, UnetUpBlock_x_ddcm, upsample_kernel_size, trans_bias=self.trans_bias
)
def get_module_list(
self,
in_channels: List[int],
out_channels: List[int],
skip_channels: List[int],
kernel_size: Sequence[Union[Sequence[int], int]],
strides: Sequence[Union[Sequence[int], int]],
conv_block: nn.Module,
upsample_kernel_size: Optional[Sequence[Union[Sequence[int], int]]] = None,
trans_bias: bool = False,
):
layers = []
if upsample_kernel_size is not None:
for in_c, out_c, skip_c, kernel, stride, up_kernel in zip(
in_channels, out_channels, skip_channels, kernel_size, strides, upsample_kernel_size
):
params = {
"spatial_dims": self.spatial_dims,
"in_channels": in_c,
"out_channels": out_c,
"skip_channels": skip_c,
"kernel_size": kernel,
"stride": stride,
"norm_name": self.norm_name,
"act_name": self.act_name,
"dropout": self.dropout,
"upsample_kernel_size": up_kernel,
"trans_bias": trans_bias,
"my_att": self.my_att,
"skip_att": self.skip_att,
}
layer = conv_block(**params)
layers.append(layer)
else:
for in_c, out_c, kernel, stride in zip(in_channels, out_channels, kernel_size, strides):
params = {
"spatial_dims": self.spatial_dims,
"in_channels": in_c,
"out_channels": out_c,
"kernel_size": kernel,
"stride": stride,
"norm_name": self.norm_name,
"act_name": self.act_name,
"dropout": self.dropout,
}
layer = conv_block(**params)
layers.append(layer)
return nn.ModuleList(layers)
def get_deep_supervision_heads(self):
return nn.ModuleList([self.get_output_block(i + 1) for i in range(self.deep_supr_num)])
@staticmethod
def initialize_weights(module):
if isinstance(module, (nn.Conv3d, nn.Conv2d, nn.ConvTranspose3d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(module.weight, a=0.01, mode='fan_in', nonlinearity='leaky_relu')
# nn.init.xavier_normal_(module.weight, gain=1.)
# module.weight = nn.init.orthogonal_(module.weight, gain=1.)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
elif isinstance(module, (nn.BatchNorm3d, nn.BatchNorm2d)):
# module.weight.data.fill_(1)
nn.init.normal_(module.weight.data, 1.0, 0.02)
module.bias.data.zero_()
elif isinstance(module, (nn.Linear)):
nn.init.normal_(module.weight.data, 1.0, 0.001)
if module.bias is not None:
module.bias.data.zero_() | 31,635 | 41.23765 | 122 | py |
BlockGCL | BlockGCL-master/dataloader.py | import os.path as osp
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torch_geometric.data import Data
from torch_geometric.datasets import Planetoid, Amazon, Coauthor, WikiCS
from torch_geometric.transforms import Compose, NormalizeFeatures, ToUndirected
from ogb.nodeproppred import PygNodePropPredDataset
def load_data(data_dir, dataset_name,
transform=Compose([ToUndirected()]),
mask_dir="./mask",
load_mask=True,
save_mask=True):
"""Load PyG dataset."""
load_mask = save_mask = True
if dataset_name in ['Cora', 'Citeseer', 'Pubmed']:
dataset = Planetoid(root=data_dir, name=dataset_name,
transform=transform, split="full")
load_mask = save_mask = False
elif dataset_name in ['WikiCS']:
dataset = WikiCS(root=osp.join(data_dir, dataset_name),
transform=transform)
elif dataset_name in ['Computers', 'Photo']:
dataset = Amazon(root=data_dir, name=dataset_name, transform=transform)
elif dataset_name in ['CS', 'Physics']:
dataset = Coauthor(root=data_dir, name=dataset_name,
transform=transform)
elif dataset_name in ['ogbn-arxiv']:
dataset = PygNodePropPredDataset(root=data_dir, name=dataset_name,
transform=transform)
dataset.data.y = dataset.data.y.squeeze()
load_mask = save_mask = False
elif dataset_name in ['ogbn-mag']:
dataset = PygNodePropPredDataset(name=dataset_name, root=data_dir,
transform=Compose([
ToUndirected()
]))
rel_data = dataset[0]
# We are only interested in paper <-> paper relations.
data = Data(
x=rel_data.x_dict['paper'],
edge_index=rel_data.edge_index_dict[('paper', 'cites', 'paper')],
y=rel_data.y_dict['paper'])
data = transform(data)
dataset.data = data
dataset.data.y = dataset.data.y.squeeze()
load_mask = save_mask = False
else:
raise ValueError("Dataset {} not implemented.".format(dataset_name))
mask_path = osp.join(mask_dir, "{}.pt".format(dataset_name))
if osp.exists(mask_path) and load_mask:
train_mask, val_mask, test_mask = load_preset_mask(mask_path)
else:
train_mask, val_mask, test_mask = create_mask(
dataset=dataset,
dataset_name=dataset_name,
mask_path=mask_path if save_mask else None)
dataset.data.train_mask = train_mask
dataset.data.val_mask = val_mask
dataset.data.test_mask = test_mask
return dataset
def create_mask(dataset, dataset_name='WikiCS', data_seed=0, mask_path=None):
r"""Create train/val/test mask for each dataset."""
data = dataset[0]
if dataset_name in ['Cora', 'Citeseer', 'Pubmed']:
train_mask, val_mask, test_mask = \
data.train_mask, data.val_mask, data.test_mask
elif dataset_name in ['WikiCS']:
train_mask = data.train_mask.t()
val_mask = data.val_mask.t()
test_mask = data.test_mask.repeat(20, 1)
elif dataset_name in ['Computers', 'Photo', 'CS', 'Physics']:
idx = np.arange(len(data.y))
train_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
val_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
test_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
train_idx, test_idx = train_test_split(
idx, test_size=0.8, random_state=data_seed)
train_idx, val_idx = train_test_split(
train_idx, test_size=0.5, random_state=data_seed)
train_mask[train_idx] = True
val_mask[val_idx] = True
test_mask[test_idx] = True
elif dataset_name in ['ogbn-arxiv']:
split_idx = dataset.get_idx_split()
train_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
val_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
test_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
train_mask[split_idx['train']] = True
val_mask[split_idx['valid']] = True
test_mask[split_idx['test']] = True
elif dataset_name in ['ogbn-mag']:
split_idx = dataset.get_idx_split()
train_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
val_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
test_mask = torch.zeros(data.y.size(0), dtype=torch.bool)
train_mask[split_idx['train']['paper']] = True
val_mask[split_idx['valid']['paper']] = True
test_mask[split_idx['test']['paper']] = True
# save preset mask
if mask_path is not None:
torch.save([train_mask, val_mask, test_mask], mask_path)
return train_mask, val_mask, test_mask
def load_preset_mask(mask_path):
return torch.load(mask_path)
| 5,008 | 37.236641 | 81 | py |
BlockGCL | BlockGCL-master/loss.py | import torch
import torch.nn.functional as F
def inv_dec_loss(h1, h2, lambd):
N = h1.size(0)
c = torch.mm(h1.T, h2)
c1 = torch.mm(h1.T, h1)
c2 = torch.mm(h2.T, h2)
c = c / N
c1 = c1 / N
c2 = c2 / N
loss_inv = -torch.diagonal(c).sum()
iden = torch.eye(c.shape[0]).to(h1.device)
loss_dec1 = (iden - c1).pow(2).sum()
loss_dec2 = (iden - c2).pow(2).sum()
loss = loss_inv + lambd * (loss_dec1 + loss_dec2)
return loss | 471 | 19.521739 | 53 | py |
BlockGCL | BlockGCL-master/utils.py | import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch_sparse import SparseTensor
def set_random_seeds(random_seed=0):
r"""Set the seed for generating random numbers."""
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
def edgeidx2sparse(edge_index, num_nodes):
return SparseTensor.from_edge_index(
edge_index, sparse_sizes=(num_nodes, num_nodes)
).to(edge_index.device)
| 658 | 27.652174 | 55 | py |
BlockGCL | BlockGCL-master/model.py | import torch
import torch.nn as nn
from torch_geometric.nn import BatchNorm, GCNConv, LayerNorm, SAGEConv, Sequential
def get_activation(name='ReLU'):
if name == 'ReLU':
return nn.ReLU()
elif name == "PReLU":
return nn.PReLU()
else:
raise NotImplementedError("Acitivation {} not implemented!".format(name))
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, layer_name="gcn", act_name="ReLU", batchnorm=True) -> None:
super().__init__()
self.in_dim, self.out_dim = in_dim, out_dim
self.batchnorm = None
self.layer = self.get_layer(layer_name)
if batchnorm:
self.batchnorm = nn.BatchNorm1d(out_dim)
self.act = get_activation(act_name)
def reset_parameters(self):
self.layer.reset_parameters()
if self.batchnorm is not None:
self.batchnorm.reset_parameters()
def get_layer(self, name="GCN"):
if name == "GCN":
return GCNConv(in_channels=self.in_dim, out_channels=self.out_dim)
else:
raise NotImplementedError("Layer {} not implemented!".format(name))
def forward(self, x, egde_index):
x = self.layer(x, egde_index)
if self.batchnorm is not None:
x = self.batchnorm(x)
return self.act(x)
class GCN(nn.Module):
def __init__(self, in_dim, hid_dims, args):
super().__init__()
dims = [in_dim] + hid_dims
assert len(dims) >= 2
self.layers = nn.ModuleList()
for in_dim, out_dim in zip(dims[:-1], dims[1:]):
self.layers.append(ConvLayer(in_dim, out_dim, args.layer_name, args.act_name, args.batchnorm))
def forward(self, x, edge_index):
outputs = []
for layer in self.layers:
x = layer(x.detach(), edge_index)
# x = layer(x, edge_index)
outputs.append(x)
return outputs
@torch.no_grad()
def embeds(self, x, edge_index):
for layer in self.layers:
x = layer(x.detach(), edge_index)
return x
def reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
@torch.no_grad()
def get_embeding(self, data):
self.eval()
x, edge_index = data.x, data.edge_index
for layer in self.layers:
x = layer(x, edge_index)
return x | 2,427 | 28.975309 | 106 | py |
BlockGCL | BlockGCL-master/logger.py | import functools
import logging
import os
import sys
import torch
from typing import Optional
from termcolor import colored
__all__ = ["setup_logger", "get_logger"]
# cache the opened file object, so that different calls to `setup_logger`
# with the same file name can safely write to the same file.
@functools.lru_cache(maxsize=None)
def setup_logger(
output: Optional[str] = None, distributed_rank: int = 0, *, mode: str = 'w',
color: bool = True, name: str = "exp", abbrev_name: Optional[str] = None
):
"""Initialize the graphwar logger and set its verbosity level to "DEBUG".
Parameters
----------
output : Optional[str], optional
a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
distributed_rank : int, optional
used for distributed training, by default 0
mode : str, optional
mode for the output file (if output is given), by default 'w'.
color : bool, optional
whether to use color when printing, by default True
name : str, optional
the root module name of this logger, by default "graphwar"
abbrev_name : Optional[str], optional
an abbreviation of the module, to avoid long names in logs.
Set to "" to not log the root module in logs.
By default, None.
Returns
-------
logging.Logger
a logger
Example
-------
>>> logger = setup_logger(name='my exp')
>>> logger.info('message')
[12/19 17:01:43 my exp]: message
>>> logger.error('message')
ERROR [12/19 17:02:22 my exp]: message
>>> logger.warning('message')
WARNING [12/19 17:02:32 my exp]: message
>>> # specify output files
>>> logger = setup_logger(output='log.txt', name='my exp')
# additive, by default mode='w'
>>> logger = setup_logger(output='log.txt', name='my exp', mode='a')
# once you logger is set, you can call it by
>>> logger = get_logger(name='my exp')
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if abbrev_name is None:
abbrev_name = name
plain_formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
)
# stdout logging: master only
if distributed_rank == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter(
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
abbrev_name=str(abbrev_name),
)
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
# file logging: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if distributed_rank > 0:
filename = filename + ".rank{}".format(distributed_rank)
dirs = os.path.dirname(filename)
if dirs:
if not os.path.isdir(dirs):
os.makedirs(dirs)
file_handle = logging.FileHandler(filename=filename, mode=mode)
file_handle.setLevel(logging.DEBUG)
file_handle.setFormatter(plain_formatter)
logger.addHandler(file_handle)
return logger
def get_logger(name: str = "GraphWar"):
"""Get a logger for a given name.
Parameters
----------
name : str, optional
name of the logger, by default "GraphWar"
Returns
-------
a logger for the given name
"""
return logging.getLogger(name)
class _ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
self._abbrev_name = kwargs.pop("abbrev_name", "")
if len(self._abbrev_name):
self._abbrev_name = self._abbrev_name + "."
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record):
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
else:
return log
return prefix + " " + log
class Statistics(object):
def __init__(self, runs, info=None):
self.info = info
self.results = [[] for _ in range(runs)]
def add_result(self, run, result):
assert len(result) == 2
assert run >= 0 and run < len(self.results)
self.results[run].append(result)
def print_statistics(self, run=None, f=sys.stdout, last_best=False):
if run is not None:
result = 100 * torch.tensor(self.results[run])
if last_best:
# get last max value index by reversing result tensor
argmax = result.size(0) - result[:, 0].flip(dims=[0]).argmax().item() - 1
else:
argmax = result[:, 0].argmax().item()
print(f'Run {run + 1:02d}:', file=f)
print(f'Highest Valid: {result[:, 0].max():.2f}', file=f)
print(f'Highest Eval Point: {argmax + 1}', file=f)
print(f' Final Test: {result[argmax, 1]:.2f}', file=f)
else:
result = 100 * torch.tensor(self.results)
best_results = []
for r in result:
valid = r[:, 0].max().item()
if last_best:
# get last max value index by reversing result tensor
argmax = r.size(0) - r[:, 0].flip(dims=[0]).argmax().item() - 1
else:
argmax = r[:, 0].argmax().item()
test = r[argmax, 1].item()
best_results.append((valid, test))
best_result = torch.tensor(best_results)
print(f'All runs:', file=f)
r = best_result[:, 0]
print(f'Highest Valid: {r.mean():.2f} ± {r.std():.2f}', file=f)
r = best_result[:, 1]
print(f' Final Test: {r.mean():.2f} ± {r.std():.2f}', file=f)
return r.mean().cpu().item(), r.std().cpu().item() | 6,665 | 33.184615 | 89 | py |
BlockGCL | BlockGCL-master/eval.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
def test(embeds, data, num_classes, FLAGS, device="cpu"):
return node_cls_downstream_task_eval(
input_emb=embeds, data=data, num_classes=num_classes,
lr=FLAGS.lr_cls, wd=FLAGS.wd_cls,
cls_epochs=FLAGS.epochs_cls, cls_runs=5, device=device)
def batch_test(embeds, data, num_classes, FLAGS, device="cpu"):
return batch_node_cls_downstream_task_eval(
input_emb=embeds, data=data, num_classes=num_classes,
lr=FLAGS.lr_cls, wd=FLAGS.wd_cls,
cls_epochs=FLAGS.epochs_cls, cls_runs=5, device=device)
def eval_acc(model, x, y):
model.eval()
with torch.no_grad():
output = model(x)
y_pred = torch.argmax(output, dim=1).squeeze(-1)
return (y_pred == y).float().mean().item()
class Classifier(nn.Module):
def __init__(self, in_dim, out_dim):
super(Classifier, self).__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, x):
x = self.linear(x)
return x.log_softmax(dim=-1)
def reset_parameters(self):
self.linear.reset_parameters()
def batch_train_cls(cls, x, y, train_mask, val_mask, test_mask,
lr=1e-2, weight_decay=1e-5, epochs=100):
cls.reset_parameters()
optimizer = torch.optim.AdamW(
cls.parameters(), lr=lr, weight_decay=weight_decay)
train_x, train_y = x[train_mask], y[train_mask]
val_x, val_y = x[val_mask], y[val_mask]
test_x, test_y = x[test_mask], y[test_mask]
train_loader = DataLoader(torch.arange(train_x.size(0)), pin_memory=False, batch_size=8192, shuffle=True)
best_val_acc, best_test_acc = 0.0, 0.0
for _ in range(epochs):
for train_idx in train_loader:
cls.train()
optimizer.zero_grad()
output = cls(train_x[train_idx])
loss = F.nll_loss(output, train_y[train_idx])
loss.backward()
optimizer.step()
val_acc, test_acc = eval_acc(cls, val_x, val_y), eval_acc(cls, test_x, test_y)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
return best_val_acc, best_test_acc
def train_cls(cls, x, y, train_mask, val_mask, test_mask,
lr=1e-2, weight_decay=1e-5, epochs=100):
cls.reset_parameters()
optimizer = torch.optim.AdamW(
cls.parameters(), lr=lr, weight_decay=weight_decay)
train_x, train_y = x[train_mask], y[train_mask]
val_x, val_y = x[val_mask], y[val_mask]
test_x, test_y = x[test_mask], y[test_mask]
best_val_acc, best_test_acc = 0.0, 0.0
for _ in range(epochs):
cls.train()
optimizer.zero_grad()
output = cls(train_x)
loss = F.nll_loss(output, train_y)
loss.backward()
optimizer.step()
val_acc, test_acc = eval_acc(cls, val_x, val_y), eval_acc(cls, test_x, test_y)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
return best_val_acc, best_test_acc
def batch_node_cls_downstream_task_eval(input_emb, data, num_classes,
lr, wd, cls_epochs=100,
cls_runs=10, device="cpu"):
all_val_acc, all_test_acc = [], []
# input_emb = F.normalize(input_emb, dim=1) # l2 normalize
gnn_emb_dim = input_emb.size(1)
classifier = Classifier(gnn_emb_dim, num_classes).to(device)
for _ in range(cls_runs):
best_val_acc, best_test_acc = batch_train_cls(
classifier, input_emb, data.y,
data.train_mask, data.val_mask, data.test_mask,
lr=lr, weight_decay=wd, epochs=cls_epochs)
all_val_acc.append(best_val_acc)
all_test_acc.append(best_test_acc)
return all_val_acc, all_test_acc
def node_cls_downstream_task_eval(input_emb, data, num_classes,
lr, wd, cls_epochs=100,
cls_runs=10, device="cpu"):
all_val_acc, all_test_acc = [], []
# input_emb = F.normalize(input_emb, dim=1) # l2 normalize
gnn_emb_dim = input_emb.size(1)
classifier = Classifier(gnn_emb_dim, num_classes).to(device)
for _ in range(cls_runs):
best_val_acc, best_test_acc = train_cls(
classifier, input_emb, data.y,
data.train_mask, data.val_mask, data.test_mask,
lr=lr, weight_decay=wd, epochs=cls_epochs)
all_val_acc.append(best_val_acc)
all_test_acc.append(best_test_acc)
return all_val_acc, all_test_acc
| 4,642 | 31.468531 | 109 | py |
BlockGCL | BlockGCL-master/train.py | import copy
import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from absl import app, flags
from torch.optim import AdamW
# custom modules
from logger import setup_logger
from utils import set_random_seeds, edgeidx2sparse
from transforms import get_graph_drop_transform
from model import GCN
from loss import inv_dec_loss
from eval import test, batch_test
from dataloader import load_data
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'model_seed', 123, 'Random seed used for model initialization and training.')
flags.DEFINE_integer(
'data_seed', 0, 'Random seed used to generate train/val/test split.')
flags.DEFINE_integer('gpu_id', 0, 'The id of GPU to use. -1 indicates CPU.')
# Dataset.
flags.DEFINE_enum('dataset', 'ogbn-mag',
['Cora', 'Citeseer', 'Pubmed', 'Computers', 'Photo',
'CS', 'Physics', 'WikiCS', 'ogbn-arxiv', 'ogbn-mag'],
'Which graph dataset to use.')
flags.DEFINE_string('data_dir', '~/public_data/pyg_data/',
'Where the dataset resides.')
# Architecture.
flags.DEFINE_multi_integer('graph_encoder_layer', [
256, 256], 'Conv layer sizes.')
flags.DEFINE_bool('batchnorm', True, 'Batchnorm or not.')
flags.DEFINE_string('layer_name', "GCN", 'Con. layer.')
flags.DEFINE_string('act_name', "ReLU", 'Activation funciton.')
# Training hyperparameters.
flags.DEFINE_float('lambd', 1e-3, 'The ratio for decorrelation loss.')
flags.DEFINE_integer('epochs', 500, 'The number of training epochs.')
flags.DEFINE_float('lr', 1e-3, 'The learning rate for model training.')
flags.DEFINE_float('weight_decay', 1e-5,
'The value of the weight decay for training.')
flags.DEFINE_float(
'lr_cls', 1e-2,
'The learning rate for model training for downstream classifier.')
flags.DEFINE_float(
'wd_cls', 1e-5,
'The value of the weight decay for training for downstream classifier..')
flags.DEFINE_integer(
'epochs_cls', 100,
'The number of training epochs for node downstream classifier.')
# Augmentations.
flags.DEFINE_float('drop_edge_p', 0.4, 'Probability of edge dropout 1.')
flags.DEFINE_float('drop_feat_p', 0.2,
'Probability of node feature dropout 1.')
# Logging and checkpoint.
flags.DEFINE_string(
'logdir', None, 'Where the checkpoint and logs are stored.')
flags.DEFINE_string('mask_dir', './mask',
'Where the checkpoint and logs are stored.')
# Evaluation
flags.DEFINE_integer('eval_period', 5, 'Evaluate every eval_epochs.')
def run(dataset, logger):
gpu_available = torch.cuda.is_available() and FLAGS.gpu_id >= 0
device = torch.device("cuda:{}".format(FLAGS.gpu_id)) if gpu_available \
else torch.device("cpu")
logger.info("Using {} for training.".format(device))
data = dataset[0].to(device)
num_classes = dataset.num_classes
# set random seed
if FLAGS.model_seed is not None:
set_random_seeds(FLAGS.model_seed)
logger.info("Random seed set to {}.".format(FLAGS.model_seed))
transform = get_graph_drop_transform(drop_edge_p=FLAGS.drop_edge_p,
drop_feat_p=FLAGS.drop_feat_p)
encoder = GCN(data.x.size(1), FLAGS.graph_encoder_layer, FLAGS).to(device)
optimizer = AdamW(params=[{"params" :encoder.parameters()}],
lr=FLAGS.lr,
weight_decay=FLAGS.weight_decay)
# number of parameters
total_params = sum([param.nelement() for param in encoder.parameters()])
logger.info(encoder)
logger.info("Number of parameter: %.2fM" % (total_params/1e6))
# start training
logger.info("Satrt training")
best_test_acc_mean, best_test_acc_std, \
best_test_acc_epoch, best_test_acc_list = 0, 0, 0, []
for epoch in range(1, 1 + FLAGS.epochs):
# torch.cuda.empty_cache()
encoder.train()
optimizer.zero_grad()
data1 = transform(data)
data2 = transform(data)
data1.edge_index = edgeidx2sparse(data1.edge_index, data1.x.size(0))
data2.edge_index = edgeidx2sparse(data2.edge_index, data2.x.size(0))
outputs1, outputs2 = encoder(data1.x, data1.edge_index), encoder(data2.x, data2.edge_index)
total_loss = 0.
for o1, o2 in list(zip(outputs1, outputs2)):
loss = inv_dec_loss(o1, o2, FLAGS.lambd)
# total_loss += loss
total_loss += loss.item()
loss.backward()
# total_loss.backward()
optimizer.step()
# eval
if epoch == 1 or epoch % FLAGS.eval_period == 0:
encoder.eval()
with torch.no_grad():
# embeds = torch.cat(encoder(data), dim=1)
embeds = encoder(data.x, data.edge_index)[-1]
# embeds = encoder.embeds(data.x, data.edge_index)
# embeds = torch.cat(embeds, dim=1)
if FLAGS.dataset in ['ogbn-arxiv', 'ogbn-mag']:
_, test_acc_list = batch_test(embeds=embeds,
data=data,
num_classes=num_classes,
FLAGS=FLAGS,
device=device)
else:
_, test_acc_list = test(embeds=embeds,
data=data,
num_classes=num_classes,
FLAGS=FLAGS,
device=device)
test_acc_mean, test_acc_std = \
np.mean(test_acc_list), np.std(test_acc_list)
if test_acc_mean > best_test_acc_mean:
best_test_acc_mean = test_acc_mean
best_test_acc_std = test_acc_std
best_test_acc_epoch = epoch
best_test_acc_list = copy.deepcopy(test_acc_list)
# save encoder weights
# torch.save(model.online_encoder.state_dict(), os.path.join(FLAGS.logdir, '{}.pt'.format(FLAGS.dataset)))
logger.info("[Epoch {:4d}/{:4d}] loss={:.4f}, "
"test_acc={:.2f}±{:.2f} "
"[best_test_acc: {:.2f}±{:.2f} at epoch {}]".format(
epoch, FLAGS.epochs, total_loss,
test_acc_mean * 100, test_acc_std * 100,
best_test_acc_mean * 100, best_test_acc_std * 100,
best_test_acc_epoch
))
logger.info("Best test acc: {:.2f}±{:.2f} at epoch {}: {}".format(
best_test_acc_mean * 100, best_test_acc_std * 100,
best_test_acc_epoch, best_test_acc_list
))
def get_dataset():
dataset = load_data(data_dir=osp.expanduser(FLAGS.data_dir),
dataset_name=FLAGS.dataset,
mask_dir=FLAGS.mask_dir,
load_mask=False,
save_mask=False)
return dataset
def main(argv):
logger = setup_logger(output="./logs/exp.log".format(FLAGS.dataset))
dataset = get_dataset()
run(dataset=dataset, logger=logger)
if __name__ == "__main__":
app.run(main)
| 7,308 | 37.267016 | 122 | py |
BlockGCL | BlockGCL-master/transforms.py | import copy
import torch
from torch_geometric.utils.dropout import dropout_adj
from torch_geometric.transforms import Compose
class DropFeatures:
r"""Drops node features with probability p."""
def __init__(self, p=None):
assert 0. < p < 1., \
'Dropout probability has to be between 0 and 1, but got %.2f' % p
self.p = p
def __call__(self, data):
drop_mask = torch.empty(size=(data.x.size(1),),
dtype=torch.float32,
device=data.x.device).uniform_(0, 1) < self.p
data.x[:, drop_mask] = 0
return data
def __repr__(self):
return '{}(p={})'.format(self.__class__.__name__, self.p)
class DropEdges:
r"""Drops edges with probability p."""
def __init__(self, p, force_undirected=False):
assert 0. < p < 1., \
'Dropout probability has to be between 0 and 1, but got %.2f' % p
self.p = p
self.force_undirected = force_undirected
def __call__(self, data):
edge_index = data.edge_index
edge_attr = data.edge_attr if 'edge_attr' in data else None
edge_index, edge_attr = dropout_adj(edge_index, edge_attr,
p=self.p,
force_undirected=self.force_undirected)
data.edge_index = edge_index
if edge_attr is not None:
data.edge_attr = edge_attr
return data
def __repr__(self):
return '{}(p={}, force_undirected={})'.format(
self.__class__.__name__,
self.p,
self.force_undirected)
def get_graph_drop_transform(drop_edge_p, drop_feat_p):
transforms = list()
# make copy of graph
transforms.append(copy.deepcopy)
# drop edges
if drop_edge_p > 0.:
transforms.append(DropEdges(drop_edge_p))
# drop features
if drop_feat_p > 0.:
transforms.append(DropFeatures(drop_feat_p))
return Compose(transforms)
| 2,025 | 27.942857 | 83 | py |
RecSys_PyTorch | RecSys_PyTorch-master/main.py | # Import packages
import os
import torch
import models
from data.dataset import UIRTDataset
from evaluation.evaluator import Evaluator
from experiment.early_stop import EarlyStop
from loggers import FileLogger, CSVLogger
from utils.general import make_log_dir, set_random_seed
from config import load_config
"""
Configurations
"""
config = load_config()
exp_config = config.experiment
gpu_id = exp_config.gpu
seed = exp_config.seed
dataset_config = config.dataset
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
set_random_seed(seed)
"""
Dataset
"""
dataset = UIRTDataset(**dataset_config)
# """
# Early stop
# """
# early_stop = EarlyStop(**config['EarlyStop'])
"""
Model base class
"""
model_name = config.experiment.model_name
model_base = getattr(models, model_name)
hparams = config.hparams
"""
Logger
"""
log_dir = make_log_dir(os.path.join(exp_config.save_dir, model_name))
logger = FileLogger(log_dir)
csv_logger = CSVLogger(log_dir)
# Save log & dataset config.
logger.info(config)
logger.info(dataset)
valid_input, valid_target = dataset.valid_input, dataset.valid_target
evaluator = Evaluator(valid_input, valid_target, protocol=dataset.protocol, ks=config.evaluator.ks)
model = model_base(dataset, hparams, device)
ret = model.fit(dataset, exp_config, evaluator=evaluator, loggers=[logger, csv_logger])
print(ret['scores'])
csv_logger.save() | 1,665 | 22.8 | 103 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/RP3b.py | """
Bibek Paudel et al., Updatable, accurate, diverse, and scalablerecommendations for interactive applications. TiiS 2017.
https://www.zora.uzh.ch/id/eprint/131338/1/TiiS_2016.pdf
Main model codes from https://github.com/MaurizioFD/RecSys2019_DeepLearning_Evaluation
"""
import torch
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
from sklearn.preprocessing import normalize
from tqdm import tqdm
from models.BaseModel import BaseModel
class RP3b(BaseModel):
def __init__(self, dataset, hparams, device):
super(RP3b, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.topk = hparams['topk']
self.alpha = hparams['alpha']
self.beta = hparams['beta']
def fit_rp3b(self, train_matrix, block_dim=200):
num_items = train_matrix.shape[1]
Pui = normalize(train_matrix, norm='l1', axis=1)
# Piu is the column-normalized, "boolean" urm transposed
X_bool = train_matrix.transpose(copy=True)
X_bool.data = np.ones(X_bool.data.size, np.float32)
# Taking the degree of each item to penalize top popular
# Some rows might be zero, make sure their degree remains zero
X_bool_sum = np.array(X_bool.sum(axis=1)).ravel()
degree = np.zeros(train_matrix.shape[1])
nonZeroMask = X_bool_sum != 0.0
degree[nonZeroMask] = np.power(X_bool_sum[nonZeroMask], -self.beta)
Piu = normalize(X_bool, norm='l1', axis=1)
del(X_bool)
if self.alpha != 1:
Pui = Pui.power(self.alpha)
Piu = Piu.power(self.alpha)
# Use array as it reduces memory requirements compared to lists
dataBlock = 10000000
rows = np.zeros(dataBlock, dtype=np.int32)
cols = np.zeros(dataBlock, dtype=np.int32)
values = np.zeros(dataBlock, dtype=np.float32)
numCells = 0
item_blocks = range(0, num_items, block_dim)
tqdm_iterator = tqdm(item_blocks, desc='# items blocks covered', total=len(item_blocks))
for cur_items_start_idx in tqdm_iterator:
if cur_items_start_idx + block_dim > num_items:
block_dim = num_items - cur_items_start_idx
# second * third transition matrix: # of ditinct path from item to item
# block_dim x item
Piui = Piu[cur_items_start_idx:cur_items_start_idx + block_dim, :] * Pui
Piui = Piui.toarray()
for row_in_block in range(block_dim):
# Delete self connection
row_data = np.multiply( Piui[row_in_block, :], degree)
row_data[cur_items_start_idx + row_in_block] = 0
# Top-k items
best = row_data.argsort()[::-1][:self.topk]
# add non-zero top-k path only (efficient)
notZerosMask = row_data[best] != 0.0
values_to_add = row_data[best][notZerosMask]
cols_to_add = best[notZerosMask]
for index in range(len(values_to_add)):
if numCells == len(rows):
rows = np.concatenate((rows, np.zeros(dataBlock, dtype=np.int32)))
cols = np.concatenate((cols, np.zeros(dataBlock, dtype=np.int32)))
values = np.concatenate((values, np.zeros(dataBlock, dtype=np.float32)))
rows[numCells] = cur_items_start_idx + row_in_block
cols[numCells] = cols_to_add[index]
values[numCells] = values_to_add[index]
numCells += 1
self.W_sparse = sp.csr_matrix((values[:numCells], (rows[:numCells], cols[:numCells])), shape=(Pui.shape[1], Pui.shape[1]))
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.fit_rp3b(train_matrix.tocsc())
output = train_matrix @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
# eval_pos_matrix
preds = (eval_pos * self.W_sparse).toarray()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,686 | 36.496 | 130 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/PureSVD.py | import numpy as np
import scipy.sparse as sp
from sklearn.utils.extmath import randomized_svd
import torch
import torch.nn.functional as F
from models.BaseModel import BaseModel
class PureSVD(BaseModel):
def __init__(self, dataset, hparams, device):
super(PureSVD, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.num_factors = hparams['num_factors']
self.device = device
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
# Solve EASE
train_matrix = dataset.train_data.toarray()
U, sigma, Vt = randomized_svd(train_matrix, n_components=self.num_factors, random_state=123)
s_Vt = sp.diags(sigma) * Vt
self.user_embedding = U
self.item_embedding = s_Vt.T
output = self.user_embedding @ self.item_embedding.T
loss = F.binary_cross_entropy(torch.tensor(train_matrix), torch.tensor(output))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict_batch_users(self, user_ids):
user_latent = self.user_embedding[user_ids]
return user_latent @ self.item_embedding.T
def predict(self, eval_users, eval_pos, test_batch_size):
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
pred_matrix[batch_users] = self.predict_batch_users(batch_users)
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix | 2,227 | 33.8125 | 100 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/ItemKNN.py | """
Jun Wang et al., Unifying user-based and item-based collaborative filtering approaches by similarity fusion. SIGIR 2006.
http://web4.cs.ucl.ac.uk/staff/jun.wang/papers/2006-sigir06-unifycf.pdf
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
from tqdm import tqdm
from models.BaseModel import BaseModel
class ItemKNN(BaseModel):
def __init__(self, dataset, hparams, device):
super(ItemKNN, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.topk = hparams['topk']
self.shrink = hparams['shrink']
self.feature_weighting = hparams['feature_weighting']
assert self.feature_weighting in ['tf-idf', 'bm25', 'none']
def fit_knn(self, train_matrix, block_size=500):
if self.feature_weighting == 'tf-idf':
train_matrix = self.TF_IDF(train_matrix.T).T
elif self.feature_weighting == 'bm25':
train_matrix = self.okapi_BM25(train_matrix.T).T
train_matrix = train_matrix.tocsc()
num_items = train_matrix.shape[1]
start_col_local = 0
end_col_local = num_items
start_col_block = start_col_local
this_block_size = 0
block_size = 500
sumOfSquared = np.array(train_matrix.power(2).sum(axis=0)).ravel()
sumOfSquared = np.sqrt(sumOfSquared)
values = []
rows = []
cols = []
while start_col_block < end_col_local:
end_col_block = min(start_col_block + block_size, end_col_local)
this_block_size = end_col_block-start_col_block
# All data points for a given item
# item_data: user, item blocks
item_data = train_matrix[:, start_col_block:end_col_block]
item_data = item_data.toarray().squeeze()
# If only 1 feature avoid last dimension to disappear
if item_data.ndim == 1:
item_data = np.atleast_2d(item_data)
this_block_weights = train_matrix.T.dot(item_data)
for col_index_in_block in range(this_block_size):
# this_block_size: (item,)
# similarity between 'one block item' and whole items
if this_block_size == 1:
this_column_weights = this_block_weights
else:
this_column_weights = this_block_weights[:,col_index_in_block]
# columnIndex = item index
# zero out self similarity
columnIndex = col_index_in_block + start_col_block
this_column_weights[columnIndex] = 0.0
# cosine similarity
# denominator = sqrt(l2_norm(x)) * sqrt(l2_norm(y))+ shrinkage + eps
denominator = sumOfSquared[columnIndex] * sumOfSquared + self.shrink + 1e-6
this_column_weights = np.multiply(this_column_weights, 1 / denominator)
relevant_items_partition = (-this_column_weights).argpartition(self.topk-1)[0:self.topk]
relevant_items_partition_sorting = np.argsort(-this_column_weights[relevant_items_partition])
top_k_idx = relevant_items_partition[relevant_items_partition_sorting]
# Incrementally build sparse matrix, do not add zeros
notZerosMask = this_column_weights[top_k_idx] != 0.0
numNotZeros = np.sum(notZerosMask)
values.extend(this_column_weights[top_k_idx][notZerosMask])
rows.extend(top_k_idx[notZerosMask])
cols.extend(np.ones(numNotZeros) * columnIndex)
start_col_block += block_size
self.W_sparse = sp.csr_matrix((values, (rows, cols)),
shape=(num_items, num_items),
dtype=np.float32)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.fit_knn(train_matrix)
output = train_matrix @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.W_sparse)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds
def okapi_BM25(self, rating_matrix, K1=1.2, B=0.75):
assert B>0 and B<1, "okapi_BM_25: B must be in (0,1)"
assert K1>0, "okapi_BM_25: K1 must be > 0"
# Weighs each row of a sparse matrix by OkapiBM25 weighting
# calculate idf per term (user)
rating_matrix = sp.coo_matrix(rating_matrix)
N = float(rating_matrix.shape[0])
idf = np.log(N / (1 + np.bincount(rating_matrix.col)))
# calculate length_norm per document
row_sums = np.ravel(rating_matrix.sum(axis=1))
average_length = row_sums.mean()
length_norm = (1.0 - B) + B * row_sums / average_length
# weight matrix rows by bm25
rating_matrix.data = rating_matrix.data * (K1 + 1.0) / (K1 * length_norm[rating_matrix.row] + rating_matrix.data) * idf[rating_matrix.col]
return rating_matrix.tocsr()
def TF_IDF(self, rating_matrix):
"""
Items are assumed to be on rows
:param dataMatrix:
:return:
"""
# TFIDF each row of a sparse amtrix
rating_matrix = sp.coo_matrix(rating_matrix)
N = float(rating_matrix.shape[0])
# calculate IDF
idf = np.log(N / (1 + np.bincount(rating_matrix.col)))
# apply TF-IDF adjustment
rating_matrix.data = np.sqrt(rating_matrix.data) * idf[rating_matrix.col]
return rating_matrix.tocsr() | 6,781 | 36.677778 | 146 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/MultVAE.py | """
Dawen Liang et al., Variational Autoencoders for Collaborative Filtering. WWW 2018.
https://arxiv.org/pdf/1802.05814
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .BaseModel import BaseModel
from data.generators import MatrixGenerator
class MultVAE(BaseModel):
def __init__(self, dataset, hparams, device):
super(MultVAE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
if isinstance(hparams['enc_dims'], str):
hparams['enc_dims'] = eval(hparams['enc_dims'])
self.enc_dims = [self.num_items] + list(hparams['enc_dims'])
self.dec_dims = self.enc_dims[::-1]
self.dims = self.enc_dims + self.dec_dims[1:]
self.total_anneal_steps = hparams['total_anneal_steps']
self.anneal_cap = hparams['anneal_cap']
self.dropout = hparams['dropout']
self.eps = 1e-6
self.anneal = 0.
self.update_count = 0
self.device = device
self.encoder = nn.ModuleList()
for i, (d_in, d_out) in enumerate(zip(self.enc_dims[:-1], self.enc_dims[1:])):
if i == len(self.enc_dims[:-1]) - 1:
d_out *= 2
self.encoder.append(nn.Linear(d_in, d_out))
if i != len(self.enc_dims[:-1]) - 1:
self.encoder.append(nn.Tanh())
self.decoder = nn.ModuleList()
for i, (d_in, d_out) in enumerate(zip(self.dec_dims[:-1], self.dec_dims[1:])):
self.decoder.append(nn.Linear(d_in, d_out))
if i != len(self.dec_dims[:-1]) - 1:
self.decoder.append(nn.Tanh())
self.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, rating_matrix):
# encoder
h = F.dropout(F.normalize(rating_matrix), p=self.dropout, training=self.training)
for layer in self.encoder:
h = layer(h)
# sample
mu_q = h[:, :self.enc_dims[-1]]
logvar_q = h[:, self.enc_dims[-1]:] # log sigmod^2 batch x 200
std_q = torch.exp(0.5 * logvar_q) # sigmod batch x 200
epsilon = torch.zeros_like(std_q).normal_(mean=0, std=0.01)
sampled_z = mu_q + self.training * epsilon * std_q
output = sampled_z
for layer in self.decoder:
output = layer(output)
if self.training:
kl_loss = ((0.5 * (-logvar_q + torch.exp(logvar_q) + torch.pow(mu_q, 2) - 1)).sum(1)).mean()
return output, kl_loss
else:
return output
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
# user, item, rating pairs
train_matrix = dataset.train_data
num_training = train_matrix.shape[0]
num_batches = int(np.ceil(num_training / exp_config.batch_size))
batch_generator = MatrixGenerator(train_matrix, batch_size=exp_config.batch_size, shuffle=True, device=self.device)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, batch_matrix in enumerate(batch_generator):
self.optimizer.zero_grad()
if self.total_anneal_steps > 0:
self.anneal = min(self.anneal_cap, 1. * self.update_count / self.total_anneal_steps)
else:
self.anneal = self.anneal_cap
pred_matrix, kl_loss = self.forward(batch_matrix)
# cross_entropy
ce_loss = F.binary_cross_entropy_with_logits(pred_matrix, batch_matrix, reduction='none').sum(1).mean()
batch_loss = ce_loss + kl_loss * self.anneal
batch_loss.backward()
self.optimizer.step()
self.update_count += 1
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def predict(self, eval_users, eval_pos, test_batch_size):
with torch.no_grad():
input_matrix = torch.FloatTensor(eval_pos.toarray()).to(self.device)
preds = np.zeros(eval_pos.shape)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = self.forward(test_batch_matrix)
preds[batch_idx] = batch_pred_matrix.detach().cpu().numpy()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 5,994 | 37.429487 | 123 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/P3a.py | """
Colin Cooper et al., Random Walks in Recommender Systems: Exact Computation and Simulations. WWW 2014.
http://wwwconference.org/proceedings/www2014/companion/p811.pdf
Main model codes from https://github.com/MaurizioFD/RecSys2019_DeepLearning_Evaluation
"""
import torch
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
from sklearn.preprocessing import normalize
from tqdm import tqdm
from models.BaseModel import BaseModel
class P3a(BaseModel):
def __init__(self, dataset, hparams, device):
super(P3a, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.topk = hparams['topk']
self.alpha = hparams['alpha']
def fit_p3a(self, train_matrix, block_dim=200):
num_items = train_matrix.shape[1]
# (user, item), 1 / # user
Pui = normalize(train_matrix, norm='l1', axis=1)
X_bool = train_matrix.transpose(copy=True)
X_bool.data = np.ones(X_bool.data.size, np.float32)
# (item, uesr), 1 / # item
Piu = normalize(X_bool, norm='l1', axis=1)
del(X_bool)
if self.alpha != 1:
Pui = Pui.power(self.alpha)
Piu = Piu.power(self.alpha)
# Use array as it reduces memory requirements compared to lists
dataBlock = 10000000
rows = np.zeros(dataBlock, dtype=np.int32)
cols = np.zeros(dataBlock, dtype=np.int32)
values = np.zeros(dataBlock, dtype=np.float32)
numCells = 0
item_blocks = range(0, num_items, block_dim)
tqdm_iterator = tqdm(item_blocks, desc='# items blocks covered', total=len(item_blocks))
for cur_items_start_idx in tqdm_iterator:
if cur_items_start_idx + block_dim > num_items:
block_dim = num_items - cur_items_start_idx
# second * third transition matrix: # of ditinct path from item to item
# block_dim x item
Piui = Piu[cur_items_start_idx:cur_items_start_idx + block_dim, :] * Pui
Piui = Piui.toarray()
for row_in_block in range(block_dim):
# Delete self connection
row_data = Piui[row_in_block, :]
row_data[cur_items_start_idx + row_in_block] = 0
# Top-k items
best = row_data.argsort()[::-1][:self.topk]
# add non-zero top-k path only (efficient)
notZerosMask = row_data[best] != 0.0
values_to_add = row_data[best][notZerosMask]
cols_to_add = best[notZerosMask]
for index in range(len(values_to_add)):
if numCells == len(rows):
rows = np.concatenate((rows, np.zeros(dataBlock, dtype=np.int32)))
cols = np.concatenate((cols, np.zeros(dataBlock, dtype=np.int32)))
values = np.concatenate((values, np.zeros(dataBlock, dtype=np.float32)))
rows[numCells] = cur_items_start_idx + row_in_block
cols[numCells] = cols_to_add[index]
values[numCells] = values_to_add[index]
numCells += 1
self.W_sparse = sp.csr_matrix((values[:numCells], (rows[:numCells], cols[:numCells])), shape=(Pui.shape[1], Pui.shape[1]))
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.fit_p3a(train_matrix.tocsc())
output = train_matrix @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.W_sparse)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,840 | 35.954198 | 130 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/CDAE.py | """
Yao Wu et al., Collaborative denoising auto-encoders for top-n recommender systems. WSDM 2016.
https://alicezheng.org/papers/wsdm16-cdae.pdf
"""
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import MatrixGenerator
class CDAE(BaseModel):
def __init__(self, dataset, hparams, device):
super(CDAE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.hidden_dim = hparams['hidden_dim']
self.act = hparams['act']
self.corruption_ratio = hparams['corruption_ratio']
self.device = device
self.user_embedding = nn.Embedding(self.num_users, self.hidden_dim)
self.encoder = nn.Linear(self.num_items, self.hidden_dim)
self.decoder = nn.Linear(self.hidden_dim, self.num_items)
self.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, user_id, rating_matrix):
# corruption
rating_matrix = F.dropout(rating_matrix, self.corruption_ratio, training=self.training)
# AE
enc = torch.tanh(self.encoder(rating_matrix) + self.user_embedding(user_id))
dec = self.decoder(enc)
return torch.sigmoid(dec)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
num_training = train_matrix.shape[0]
num_batches = int(np.ceil(num_training / exp_config.batch_size))
batch_generator = MatrixGenerator(train_matrix, return_index=True, batch_size=exp_config.batch_size, shuffle=True, device=self.device)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, (batch_matrix, batch_users) in enumerate(batch_generator):
self.optimizer.zero_grad()
pred_matrix = self.forward(batch_users, batch_matrix)
# cross_entropy
batch_loss = F.binary_cross_entropy(pred_matrix, batch_matrix, reduction='none').sum(1).mean()
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def predict(self, eval_users, eval_pos, test_batch_size):
with torch.no_grad():
input_matrix = torch.FloatTensor(eval_pos.toarray()).to(self.device)
preds = np.zeros(shape=input_matrix.shape)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_idx_tensor = torch.LongTensor(batch_idx).to(self.device)
batch_pred_matrix = self.forward(batch_idx_tensor, test_batch_matrix)
preds[batch_idx] = batch_pred_matrix.detach().cpu().numpy()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,533 | 38.086207 | 142 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/DAE.py | """
Yao Wu et al., Collaborative denoising auto-encoders for top-n recommender systems. WSDM 2016.
https://alicezheng.org/papers/wsdm16-cdae.pdf
"""
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import MatrixGenerator
class DAE(BaseModel):
def __init__(self, dataset, hparams, device):
super(DAE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.hidden_dim = hparams['hidden_dim']
self.act = hparams['act']
self.corruption_ratio = hparams['corruption_ratio']
self.device = device
self.encoder = nn.Linear(self.num_items, self.hidden_dim)
self.decoder = nn.Linear(self.hidden_dim, self.num_items)
self.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, rating_matrix):
# corruption
rating_matrix = F.dropout(rating_matrix, self.corruption_ratio, training=self.training)
# AE
enc = torch.tanh(self.encoder(rating_matrix))
dec = self.decoder(enc)
return torch.sigmoid(dec)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
# user, item, rating pairs
train_matrix = dataset.train_data
num_training = train_matrix.shape[0]
num_batches = int(np.ceil(num_training / exp_config.batch_size))
batch_generator = MatrixGenerator(train_matrix, batch_size=exp_config.batch_size, shuffle=True, device=self.device)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, batch_matrix in enumerate(batch_generator):
self.optimizer.zero_grad()
pred_matrix = self.forward(batch_matrix)
# cross_entropy
batch_loss = F.binary_cross_entropy(pred_matrix, batch_matrix, reduction='none').sum(1).mean()
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def predict(self, eval_users, eval_pos, test_batch_size):
with torch.no_grad():
input_matrix = torch.FloatTensor(eval_pos.toarray()).to(self.device)
preds = np.zeros(eval_pos.shape)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = self.forward(test_batch_matrix)
preds[batch_idx] += batch_pred_matrix.detach().cpu().numpy()
preds[eval_pos.nonzero()] = float('-inf')
return preds | 4,313 | 36.513043 | 123 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/LightGCN.py | """
LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation,
Xiangnan He et al.,
SIGIR 2020.
"""
import os
import math
import time
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import PairwiseGenerator
class LightGCN(BaseModel):
def __init__(self, dataset, hparams, device):
super(LightGCN, self).__init__()
self.data_name = dataset.dataname
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.emb_dim = hparams['emb_dim']
self.num_layers = hparams['num_layers']
self.node_dropout = hparams['node_dropout']
self.split = hparams['split']
self.num_folds = hparams['num_folds']
self.reg = hparams['reg']
self.Graph = None
self.data_loader = None
self.path = hparams['graph_dir']
if not os.path.exists(self.path):
os.mkdir(self.path)
self.device = device
self.build_graph()
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def build_graph(self):
self.user_embedding = nn.Embedding(self.num_users, self.emb_dim)
self.item_embedding = nn.Embedding(self.num_items, self.emb_dim)
nn.init.normal_(self.user_embedding.weight, 0, 0.01)
nn.init.normal_(self.item_embedding.weight, 0, 0.01)
self.user_embedding_pred = None
self.item_embedding_pred = None
self.to(self.device)
def update_lightgcn_embedding(self):
self.user_embeddings, self.item_embeddings = self._lightgcn_embedding(self.Graph)
def forward(self, user_ids, item_ids):
user_emb = F.embedding(user_ids, self.user_embeddings)
item_emb = F.embedding(item_ids, self.item_embeddings)
pred_rating = torch.sum(torch.mul(user_emb, item_emb), 1)
return pred_rating
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.Graph = self.getSparseGraph(train_matrix)
batch_generator = PairwiseGenerator(
train_matrix, num_negatives=1, num_positives_per_user=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
num_batches = len(batch_generator)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, (batch_users, batch_pos, batch_neg) in enumerate(batch_generator):
self.optimizer.zero_grad()
batch_loss = self.process_one_batch(batch_users, batch_pos, batch_neg)
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def process_one_batch(self, users, pos_items, neg_items):
self.update_lightgcn_embedding()
pos_scores = self.forward(users, pos_items)
neg_scores = self.forward(users, neg_items)
loss = -F.sigmoid(pos_scores - neg_scores).log().mean()
return loss
def predict_batch_users(self, user_ids):
user_embeddings = F.embedding(user_ids, self.user_embeddings)
item_embeddings = self.item_embeddings
return user_embeddings @ item_embeddings.T
def predict(self, eval_users, eval_pos, test_batch_size):
self.update_lightgcn_embedding()
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
with torch.no_grad():
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
batch_users_torch = torch.LongTensor(batch_users).to(self.device)
pred_matrix[batch_users] = self.predict_batch_users(batch_users_torch).detach().cpu().numpy()
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix
##################################### LightGCN Code
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = torch.rand(len(values)) + keep_prob
random_index = random_index.int().bool()
index = index[random_index]
values = values[random_index]/keep_prob
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def _lightgcn_embedding(self, graph):
users_emb = self.user_embedding.weight
items_emb = self.item_embedding.weight
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
if self.node_dropout > 0:
if self.training:
g_droped = self.__dropout(graph, self.node_dropout)
else:
g_droped = graph
else:
g_droped = graph
for layer in range(self.num_layers):
if self.split:
temp_emb = []
for f in range(len(g_droped)):
temp_emb.append(torch.sparse.mm(g_droped[f], all_emb))
side_emb = torch.cat(temp_emb, dim=0)
all_emb = side_emb
else:
all_emb = torch.sparse.mm(g_droped, all_emb)
embs.append(all_emb)
embs = torch.stack(embs, dim=1)
light_out = torch.mean(embs, dim=1)
users, items = torch.split(light_out, [self.num_users, self.num_items])
return users, items
def make_train_matrix(self):
train_matrix_arr = self.dataset.train_matrix.toarray()
self.train_matrix = sp.csr_matrix(train_matrix_arr)
def _split_A_hat(self, A):
A_fold = []
fold_len = (self.num_users + self.num_items) // self.num_folds
for i_fold in range(self.num_folds):
start = i_fold*fold_len
if i_fold == self.num_folds - 1:
end = self.num_users + self.num_items
else:
end = (i_fold + 1) * fold_len
A_fold.append(self._convert_sp_mat_to_sp_tensor(A[start:end]).coalesce().to(self.device))
return A_fold
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
row = torch.Tensor(coo.row).long()
col = torch.Tensor(coo.col).long()
index = torch.stack([row, col])
data = torch.FloatTensor(coo.data)
return torch.sparse.FloatTensor(index, data, torch.Size(coo.shape))
def getSparseGraph(self, rating_matrix):
n_users, n_items = rating_matrix.shape
print("loading adjacency matrix")
filename = f'{self.data_name}_s_pre_adj_mat.npz'
try:
pre_adj_mat = sp.load_npz(os.path.join(self.path, filename))
print("successfully loaded...")
norm_adj = pre_adj_mat
except :
print("generating adjacency matrix")
s = time.time()
adj_mat = sp.dok_matrix((n_users + n_items, n_users + n_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = rating_matrix.tolil()
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat = adj_mat.todok()
# adj_mat = adj_mat + sp.eye(adj_mat.shape[0])
rowsum = np.array(adj_mat.sum(axis=1))
d_inv = np.power(rowsum, -0.5).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat = sp.diags(d_inv)
norm_adj = d_mat.dot(adj_mat)
norm_adj = norm_adj.dot(d_mat)
norm_adj = norm_adj.tocsr()
end = time.time()
print(f"costing {end-s}s, saved norm_mat...")
sp.save_npz(os.path.join(self.path, filename), norm_adj)
if self.split == True:
Graph = self._split_A_hat(norm_adj)
print("done split matrix")
else:
Graph = self._convert_sp_mat_to_sp_tensor(norm_adj)
Graph = Graph.coalesce().to(self.device)
print("don't split the matrix")
return Graph | 9,931 | 36.198502 | 109 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/NGCF.py | """
Neural Graph Collaborative Filtering,
Xiang Wang et al.,
SIGIR 2019.
[Official tensorflow]: https://github.com/xiangwang1223/neural_graph_collaborative_filtering
[PyTorch reference]: https://github.com/huangtinglin/NGCF-PyTorch
"""
import os
import math
import time
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import PairwiseGenerator
class NGCF(BaseModel):
def __init__(self, dataset, hparams, device):
super(NGCF, self).__init__()
self.data_name = dataset.dataname
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.emb_dim = hparams['emb_dim']
self.num_layers = hparams['num_layers']
self.node_dropout = hparams['node_dropout']
self.mess_dropout = hparams['mess_dropout']
self.split = hparams['split']
self.num_folds = hparams['num_folds']
self.reg = hparams['reg']
self.Graph = None
self.data_loader = None
self.path = hparams['graph_dir']
if not os.path.exists(self.path):
os.mkdir(self.path)
self.device = device
self.build_graph()
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def build_graph(self):
self.user_embedding = nn.Embedding(self.num_users, self.emb_dim)
self.item_embedding = nn.Embedding(self.num_items, self.emb_dim)
nn.init.normal_(self.user_embedding.weight, 0, 0.01)
nn.init.normal_(self.item_embedding.weight, 0, 0.01)
self.weight_dict = nn.ParameterDict()
layers = [self.emb_dim] * (self.num_layers + 1)
for k in range(len(layers)-1):
self.weight_dict.update({'W_gc_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(layers[k], layers[k+1])))})
self.weight_dict.update({'b_gc_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(1, layers[k+1])))})
self.weight_dict.update({'W_bi_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(layers[k], layers[k+1])))})
self.weight_dict.update({'b_bi_%d'%k: nn.Parameter(nn.init.normal_(torch.empty(1, layers[k+1])))})
self.to(self.device)
def update_ngcf_embedding(self):
self.user_embeddings, self.item_embeddings = self._ngcf_embedding(self.Graph)
def forward(self, user_ids, item_ids):
user_emb = F.embedding(user_ids, self.user_embeddings)
item_emb = F.embedding(item_ids, self.item_embeddings)
pred_rating = torch.sum(torch.mul(user_emb, item_emb), 1)
return pred_rating
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
self.Graph = self.getSparseGraph(train_matrix)
batch_generator = PairwiseGenerator(
train_matrix, num_negatives=1, num_positives_per_user=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
num_batches = len(batch_generator)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
for b, (batch_users, batch_pos, batch_neg) in enumerate(batch_generator):
self.optimizer.zero_grad()
batch_loss = self.process_one_batch(batch_users, batch_pos, batch_neg)
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def process_one_batch(self, users, pos_items, neg_items):
self.update_ngcf_embedding()
pos_scores = self.forward(users, pos_items)
neg_scores = self.forward(users, neg_items)
loss = -F.sigmoid(pos_scores - neg_scores).log().mean()
return loss
def predict_batch_users(self, user_ids):
user_embeddings = F.embedding(user_ids, self.user_embeddings)
item_embeddings = self.item_embeddings
return user_embeddings @ item_embeddings.T
def predict(self, eval_users, eval_pos, test_batch_size):
self.update_ngcf_embedding()
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
with torch.no_grad():
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
batch_users_torch = torch.LongTensor(batch_users).to(self.device)
pred_matrix[batch_users] = self.predict_batch_users(batch_users_torch).detach().cpu().numpy()
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix
##################################### LightGCN Code
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = torch.rand(len(values)) + keep_prob
random_index = random_index.int().bool()
index = index[random_index]
values = values[random_index]/keep_prob
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def _ngcf_embedding(self, graph):
users_emb = self.user_embedding.weight
items_emb = self.item_embedding.weight
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
if self.node_dropout > 0:
if self.training:
g_droped = self.__dropout(graph, self.node_dropout)
else:
g_droped = graph
else:
g_droped = graph
ego_emb = all_emb
for k in range(self.num_layers):
if self.split:
temp_emb = []
for f in range(len(g_droped)):
temp_emb.append(torch.sparse.mm(g_droped[f], ego_emb))
side_emb = torch.cat(temp_emb, dim=0)
else:
side_emb = torch.sparse.mm(g_droped, ego_emb)
sum_emb = torch.matmul(side_emb, self.weight_dict['W_gc_%d' % k]) + self.weight_dict['b_gc_%d' % k]
bi_emb = torch.mul(ego_emb, side_emb)
bi_emb = torch.matmul(bi_emb, self.weight_dict['W_bi_%d' % k]) + self.weight_dict['b_bi_%d' % k]
ego_emb = F.leaky_relu(sum_emb + bi_emb, negative_slope=0.2)
ego_emb = F.dropout(ego_emb, self.mess_dropout, training=self.training)
norm_emb = F.normalize(ego_emb, p=2, dim=1)
embs += [norm_emb]
embs = torch.stack(embs, dim=1)
ngcf_out = torch.mean(embs, dim=1)
users, items = torch.split(ngcf_out, [self.num_users, self.num_items])
return users, items
def _split_A_hat(self, A):
A_fold = []
fold_len = (self.num_users + self.num_items) // self.num_folds
for i_fold in range(self.num_folds):
start = i_fold*fold_len
if i_fold == self.num_folds - 1:
end = self.num_users + self.num_items
else:
end = (i_fold + 1) * fold_len
A_fold.append(self._convert_sp_mat_to_sp_tensor(A[start:end]).coalesce().to(self.device))
return A_fold
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
row = torch.Tensor(coo.row).long()
col = torch.Tensor(coo.col).long()
index = torch.stack([row, col])
data = torch.FloatTensor(coo.data)
return torch.sparse.FloatTensor(index, data, torch.Size(coo.shape))
def getSparseGraph(self, rating_matrix):
n_users, n_items = rating_matrix.shape
print("loading adjacency matrix")
filename = f'{self.data_name}_s_pre_adj_mat.npz'
try:
pre_adj_mat = sp.load_npz(os.path.join(self.path, filename))
print("successfully loaded...")
norm_adj = pre_adj_mat
except :
print("generating adjacency matrix")
s = time.time()
adj_mat = sp.dok_matrix((n_users + n_items, n_users + n_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = rating_matrix.tolil()
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat = adj_mat.todok()
# adj_mat = adj_mat + sp.eye(adj_mat.shape[0])
rowsum = np.array(adj_mat.sum(axis=1))
d_inv = np.power(rowsum, -0.5).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat = sp.diags(d_inv)
norm_adj = d_mat.dot(adj_mat)
norm_adj = norm_adj.dot(d_mat)
norm_adj = norm_adj.tocsr()
end = time.time()
print(f"costing {end-s}s, saved norm_mat...")
sp.save_npz(os.path.join(self.path, filename), norm_adj)
if self.split == True:
Graph = self._split_A_hat(norm_adj)
print("done split matrix")
else:
Graph = self._convert_sp_mat_to_sp_tensor(norm_adj)
Graph = Graph.coalesce().to(self.device)
print("don't split the matrix")
return Graph | 10,926 | 37.748227 | 118 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/MF.py | """
Steffen Rendle et al., BPR: Bayesian Personalized Ranking from Implicit Feedback. UAI 2009.
https://arxiv.org/pdf/1205.2618
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
from data.generators import PointwiseGenerator, PairwiseGenerator
class MF(BaseModel):
def __init__(self, dataset, hparams, device):
super(MF, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.hidden_dim = hparams['hidden_dim']
self.pointwise = hparams['pointwise']
self.loss_func = F.mse_loss if hparams['loss_func'] == 'mse' else F.binary_cross_entropy_with_logits
self.user_embedding = nn.Embedding(self.num_users, self.hidden_dim)
self.item_embedding = nn.Embedding(self.num_items, self.hidden_dim)
self.device = device
self.to(device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def embeddings(self, user_ids, item_ids):
user_emb = self.user_embedding(user_ids)
item_emb = self.item_embedding(item_ids)
return user_emb, item_emb
def forward(self, user_ids, item_ids):
user_emb, item_emb = self.embeddings(user_ids, item_ids)
pred_rating = torch.sum(torch.mul(user_emb, item_emb), 1)
return pred_rating
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data
# num_training = len(user_ids)
# num_batches = int(np.ceil(num_training / batch_size))
if self.pointwise:
batch_generator = PointwiseGenerator(
train_matrix, return_rating=True, num_negatives=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
else:
batch_generator = PairwiseGenerator(
train_matrix, num_negatives=1, num_positives_per_user=1,
batch_size=exp_config.batch_size, shuffle=True, device=self.device)
num_batches = len(batch_generator)
for epoch in range(1, exp_config.num_epochs + 1):
self.train()
epoch_loss = 0.0
# batch_ratings: rating if pointwise, negtive items if pairwise
for b, (batch_users, batch_pos, batch_ratings) in enumerate(batch_generator):
self.optimizer.zero_grad()
batch_loss = self.process_one_batch(batch_users, batch_pos, batch_ratings)
batch_loss.backward()
self.optimizer.step()
epoch_loss += batch_loss
if exp_config.verbose and b % 50 == 0:
print('(%3d / %3d) loss = %.4f' % (b, num_batches, batch_loss))
epoch_summary = {'loss': epoch_loss}
# Evaluate if necessary
if evaluator is not None and epoch >= exp_config.test_from and epoch % exp_config.test_step == 0:
scores = evaluator.evaluate(self)
epoch_summary.update(scores)
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
## Check early stop
if early_stop is not None:
is_update, should_stop = early_stop.step(scores, epoch)
if should_stop:
break
else:
if loggers is not None:
for logger in loggers:
logger.log_metrics(epoch_summary, epoch=epoch)
best_score = early_stop.best_score if early_stop is not None else scores
return {'scores': best_score}
def process_one_batch(self, users, items, ratings):
pos_ratings = self.forward(users, items)
if self.pointwise:
loss = self.loss_func(pos_ratings, ratings)
else:
neg_ratings = self.forward(users, ratings)
loss = -F.sigmoid(pos_ratings - neg_ratings).log().mean()
return loss
def predict_batch_users(self, user_ids):
user_latent = self.user_embedding(user_ids)
all_item_latent = self.item_embedding.weight.data
return user_latent @ all_item_latent.T
def predict(self, eval_users, eval_pos, test_batch_size):
num_eval_users = len(eval_users)
num_batches = int(np.ceil(num_eval_users / test_batch_size))
pred_matrix = np.zeros(eval_pos.shape)
perm = list(range(num_eval_users))
with torch.no_grad():
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_eval_users:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
batch_users = eval_users[batch_idx]
batch_users_torch = torch.LongTensor(batch_users).to(self.device)
pred_matrix[batch_users] = self.predict_batch_users(batch_users_torch).detach().cpu().numpy()
pred_matrix[eval_pos.nonzero()] = float('-inf')
return pred_matrix
| 5,277 | 38.38806 | 109 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/BaseModel.py | import torch.nn as nn
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def forward(self, *input):
pass
def fit(self, *input):
pass
def predict(self, eval_users, eval_pos, test_batch_size):
pass | 278 | 18.928571 | 61 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/SLIMElastic.py | """
Xia Ning et al., SLIM: Sparse Linear Methods for Top-N Recommender Systems. ICDM 2011.
http://glaros.dtc.umn.edu/gkhome/fetch/papers/SLIM2011icdm.pdf
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
from tqdm import tqdm
from sklearn.linear_model import ElasticNet
from .BaseModel import BaseModel
class SLIM(BaseModel):
def __init__(self, dataset, hparams, device):
super(SLIM, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.l1_reg = hparams['l1_reg']
self.l2_reg = hparams['l2_reg']
self.topk = hparams['topk']
self.device = device
alpha = self.l1_reg + self.l2_reg
l1_ratio = self.l1_reg / alpha
self.slim = ElasticNet(alpha=alpha,
l1_ratio=l1_ratio,
positive=True,
fit_intercept=False,
copy_X=False,
precompute=True,
selection='random',
max_iter=300,
tol=1e-3)
def fit_slim(self, train_matrix, num_blocks=10000000):
num_items = train_matrix.shape[1]
# Use array as it reduces memory requirements compared to lists
rows = np.zeros(num_blocks, dtype=np.int32)
cols = np.zeros(num_blocks, dtype=np.int32)
values = np.zeros(num_blocks, dtype=np.float32)
numCells = 0
tqdm_iterator = tqdm(range(num_items), desc='# items covered', total=num_items)
for item in tqdm_iterator:
y = train_matrix[:, item].toarray()
# set the j-th column of X to zero
start_pos = train_matrix.indptr[item]
end_pos = train_matrix.indptr[item + 1]
current_item_data_backup = train_matrix.data[start_pos: end_pos].copy()
train_matrix.data[start_pos: end_pos] = 0.0
self.slim.fit(train_matrix, y)
# Select topK values
# Sorting is done in three steps. Faster then plain np.argsort for higher number of items
# - Partition the data to extract the set of relevant items
# - Sort only the relevant items
# - Get the original item index
# nonzero_model_coef_index = self.model.coef_.nonzero()[0]
# nonzero_model_coef_value = self.model.coef_[nonzero_model_coef_index]
nonzero_model_coef_index = self.slim.sparse_coef_.indices
nonzero_model_coef_value = self.slim.sparse_coef_.data
local_topK = min(len(nonzero_model_coef_value)-1, self.topk)
relevant_items_partition = (-nonzero_model_coef_value).argpartition(local_topK)[0:local_topK]
relevant_items_partition_sorting = np.argsort(-nonzero_model_coef_value[relevant_items_partition])
ranking = relevant_items_partition[relevant_items_partition_sorting]
for index in range(len(ranking)):
if numCells == len(rows):
rows = np.concatenate((rows, np.zeros(num_blocks, dtype=np.int32)))
cols = np.concatenate((cols, np.zeros(num_blocks, dtype=np.int32)))
values = np.concatenate((values, np.zeros(num_blocks, dtype=np.float32)))
rows[numCells] = nonzero_model_coef_index[ranking[index]]
cols[numCells] = item
values[numCells] = nonzero_model_coef_value[ranking[index]]
numCells += 1
train_matrix.data[start_pos:end_pos] = current_item_data_backup
self.W_sparse = sp.csr_matrix((values[:numCells], (rows[:numCells], cols[:numCells])), shape=(num_items, num_items), dtype=np.float32)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data.tocsc()
self.fit_slim(train_matrix)
output = train_matrix.tocsr() @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.W_sparse)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds | 5,363 | 38.441176 | 142 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/EASE.py | """
Harald Steck, Embarrassingly Shallow Autoencoders for Sparse Data. WWW 2019.
https://arxiv.org/pdf/1905.03375
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
class EASE(BaseModel):
def __init__(self, dataset, hparams, device):
super(EASE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.reg = hparams['reg']
self.device = device
self.to(self.device)
def forward(self, rating_matrix):
G = (rating_matrix.T @ rating_matrix).toarray()
diag = np.diag_indices(G.shape[0])
G[diag] += self.reg
P = np.linalg.inv(G)
self.enc_w = P / (-np.diag(P))
self.enc_w[diag] = 0
# Calculate the output matrix for prediction
output = rating_matrix @ self.enc_w
return output
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
self.train()
# Solve EASE
train_matrix = dataset.train_data
output = self.forward(train_matrix)
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.enc_w)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds | 2,379 | 30.733333 | 97 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/base.py | import abc
from typing import MutableMapping
from argparse import Namespace
import torch
import numpy as np
class Logger(abc.ABC):
def __init__(self):
super().__init__()
def setup_logger(self):
pass
# @abc.abstractmethod
# def log_hparams(self, hparams):
# raise NotImplementedError('setup_logger is not implemented.')
# @abc.abstractmethod
# def log_metrics(self, metrics, epoch=None):
# raise NotImplementedError('setup_logger is not implemented.')
def log_image(self, image_name, image, epoch=None):
pass
def log_artifact(self, artifact, destination=None):
pass
def save(self):
pass
def add_dict_prefix(self, dictionary, prefix=None):
if prefix:
return {prefix + k: v for k, v in dictionary.items()}
else:
return dictionary
# def _flatten_dict(params: Dict[str, Any], delimiter: str = '/') -> Dict[str, Any]:
def _flatten_dict(self, params, delimiter= '/'):
"""
Flatten hierarchical dict, e.g. ``{'a': {'b': 'c'}} -> {'a/b': 'c'}``.
Args:
params: Dictionary containing the hyperparameters
delimiter: Delimiter to express the hierarchy. Defaults to ``'/'``.
Returns:
Flattened dict.
Examples:
>>> LightningLoggerBase._flatten_dict({'a': {'b': 'c'}})
{'a/b': 'c'}
>>> LightningLoggerBase._flatten_dict({'a': {'b': 123}})
{'a/b': 123}
"""
def _dict_generator(input_dict, prefixes=None):
prefixes = prefixes[:] if prefixes else []
if isinstance(input_dict, MutableMapping):
for key, value in input_dict.items():
if isinstance(value, (MutableMapping, Namespace)):
value = vars(value) if isinstance(value, Namespace) else value
for d in _dict_generator(value, prefixes + [key]):
yield d
else:
yield prefixes + [key, value if value is not None else str(None)]
else:
yield prefixes + [input_dict if input_dict is None else str(input_dict)]
return {delimiter.join(keys): val for *keys, val in _dict_generator(params)}
# def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]:
def _sanitize_params(self, params):
"""
Returns params with non-primitvies converted to strings for logging.
>>> params = {"float": 0.3,
... "int": 1,
... "string": "abc",
... "bool": True,
... "list": [1, 2, 3],
... "namespace": Namespace(foo=3),
... "layer": torch.nn.BatchNorm1d}
>>> import pprint
>>> pprint.pprint(LightningLoggerBase._sanitize_params(params)) # doctest: +NORMALIZE_WHITESPACE
{'bool': True,
'float': 0.3,
'int': 1,
'layer': "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>",
'list': '[1, 2, 3]',
'namespace': 'Namespace(foo=3)',
'string': 'abc'}
"""
return {k: v if type(v) in [bool, int, float, str, torch.Tensor] else str(v) for k, v in params.items()}
if __name__ == '__main__':
test_dict = {
'a': 1,
'b': [1,2,3],
'c': '[1, 2, 3]',
'd': {
'd1': 123,
'd2': 1
}
}
logger = Logger()
flattened = logger._flatten_dict(test_dict)
sanitized = logger._sanitize_params(flattened)
print(sanitized) | 3,641 | 33.685714 | 112 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/tensorboard.py | import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard.summary import hparams as hparams_tb
from logger.base import Logger
class TensorboardLogger(Logger):
def __init__(self,
log_dir:str,
experiment_name:str,
hparams:dict,
log_graph:bool=False):
self.log_dir = log_dir
self.experiment_name = experiment_name
self.log_graph = log_graph
self.hparams = hparams
self.initialize()
@property
def system_property(self):
return self.experiment.get_system_properties()
def initialize(self):
self.experiment = SummaryWriter(log_dir=self.log_dir)
def add_dict_prefix(self, metrics, metric_prefix):
return {f'{metric_prefix}/{k}':v for k, v in metrics.items()}
def log_hparams(self, hparams, metrics=None):
self.hparams.update(hparams)
flattened = self._flatten_dict(self.hparams)
for k, v in flattened.items():
if isinstance(k, (int, float, str, bool, torch.tensor)):
flattened[k] = str(v)
if metrics:
self.experiment.add_hparams(flattened, dict(metrics))
def _log_metric(self, metric_name, value, epoch=None):
# metric_name = self.add_metric_prefix(metric_name)
if epoch is None:
self.experiment.add_scalar(metric_name, value)
else:
self.experiment.add_scalar(metric_name, value, epoch)
def log_metrics(self, metrics, epoch=None, prefix=None):
if prefix is not None:
metrics = self.add_dict_prefix(metrics, prefix)
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
try:
self._log_metric(k, v, epoch)
except Exception as e:
m = f'\n you tried to log {v} which is not currently supported. Try a dict or a scalar/tensor.'
type(e)(e.message + m)
def log_image(self, image_name, image, epoch=None):
if epoch is None:
self.experiment.add_image(image_name, image)
else:
self.experiment.add_image(image_name, image, epoch)
def log_artifact(self, artifact, destination=None):
pass | 2,333 | 34.907692 | 111 | py |
RecSys_PyTorch | RecSys_PyTorch-master/utils/general.py | import os
import math
import time
import datetime
import random
import numpy as np
import torch
def make_log_dir(save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
existing_dirs = os.listdir(save_dir)
if len(existing_dirs) == 0:
idx = 0
else:
idx_list = sorted([int(d.split('_')[0]) for d in existing_dirs])
idx = idx_list[-1] + 1
cur_log_dir = '%d_%s' % (idx, time.strftime('%Y%m%d-%H%M'))
full_log_dir = os.path.join(save_dir, cur_log_dir)
if not os.path.exists(full_log_dir):
os.mkdir(full_log_dir)
else:
full_log_dir = make_log_dir(save_dir)
return full_log_dir
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def getlocaltime():
date = time.strftime('%y-%m-%d', time.localtime())
current_time = time.strftime('%H:%M:%S', time.localtime())
def seconds_to_hms(second):
return str(datetime.timedelta(seconds=second)) | 1,163 | 24.866667 | 72 | py |
RecSys_PyTorch | RecSys_PyTorch-master/data/generators.py | import torch
import numpy as np
class MatrixGenerator:
def __init__(self, input_matrix, return_index=False, batch_size=32, shuffle=True,
matrix_as_numpy=False, index_as_numpy=False, device=None):
super().__init__()
self.input_matrix = input_matrix
self.return_index = return_index
self._num_data = self.input_matrix.shape[0]
self.batch_size = batch_size
self.shuffle = shuffle
self.matrix_as_numpy = matrix_as_numpy
self.index_as_numpy = index_as_numpy
self.device = device
def __len__(self):
return int(np.ceil(self._num_data / self.batch_size))
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self._num_data)
else:
perm = np.arange(self._num_data, dtype=np.int32)
for b, st in enumerate(range(0, self._num_data, self.batch_size)):
ed = min(st + self.batch_size, self._num_data)
batch_idx = perm[st:ed]
if self.matrix_as_numpy:
batch_input = self.input_matrix[batch_idx].toarray()
else:
batch_input = torch.tensor(self.input_matrix[batch_idx].toarray(),
dtype=torch.float32, device=self.device)
if self.return_index:
if not self.index_as_numpy:
batch_idx = torch.tensor(batch_idx, dtype=torch.int64, device=self.device)
yield batch_input, batch_idx
else:
yield batch_input
class PointwiseGenerator:
def __init__(self, input_matrix, return_rating=True, as_numpy=False, negative_sample=True, num_negatives=1, batch_size=32, shuffle=True, device=None):
super().__init__()
self.input_matrix = input_matrix
self.return_rating = return_rating
self.negative_sample = negative_sample
self.num_negatives = num_negatives
self.as_numpy = as_numpy
self.batch_size = batch_size
self.shuffle = shuffle
self.device = device
self._construct()
def _construct(self):
num_users, num_items = self.input_matrix.shape
self.users = []
self.items = []
self.ratings = []
for u in range(num_users):
u_items = self.input_matrix[u].indices
u_ratings = self.input_matrix[u].data
self.users += [u] * len(u_items)
self.items += u_items.tolist()
if self.return_rating:
self.ratings += u_ratings.tolist()
self.users = np.array(self.users)
self.items = np.array(self.items)
self.ratings = np.array(self.ratings)
self._num_data = len(self.users)
def sample_negatives(self, users):
num_users, num_items = self.input_matrix.shape
users = []
negatives = []
for u in range(num_users):
u_pos_items = self.input_matrix[u].indices
prob = np.ones(num_items)
prob[u_pos_items] = 0.0
prob = prob / sum(prob)
neg_samples = np.random.choice(num_items, size=self.num_negatives, replace=False, p=prob)
users += [u] * len(neg_samples)
negatives += neg_samples.tolist()
users = np.array(users)
negatives = np.array(negatives)
ratings = np.zeros_like(users)
return users, negatives, ratings
def __len__(self):
return int(np.ceil(self._num_data / self.batch_size))
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self._num_data)
else:
perm = np.arange(self._num_data)
for b, st in enumerate(range(0, self._num_data, self.batch_size)):
ed = min(st + self.batch_size, self._num_data)
batch_idx = perm[st:ed]
batch_users = self.users[batch_idx]
batch_items = self.items[batch_idx]
if self.return_rating:
batch_ratings = self.ratings[batch_idx]
if self.negative_sample and self.num_negatives > 0:
neg_users, neg_items, neg_ratings = self.sample_negatives(batch_users)
batch_users = np.concatenate((batch_users, neg_users))
batch_items = np.concatenate((batch_items, neg_items))
batch_ratings = np.concatenate((batch_ratings, neg_ratings))
if not self.as_numpy:
batch_users = torch.tensor(batch_users, dtype=torch.long, device=self.device)
batch_items = torch.tensor(batch_items, dtype=torch.long, device=self.device)
batch_ratings = torch.tensor(batch_ratings, dtype=torch.float32, device=self.device)
yield batch_users, batch_items, batch_ratings
else:
if not self.as_numpy:
batch_users = torch.tensor(batch_users, dtype=torch.long, device=self.device)
batch_items = torch.tensor(batch_items, dtype=torch.long, device=self.device)
yield batch_users, batch_items
class PairwiseGenerator:
def __init__(self, input_matrix, as_numpy=False, num_positives_per_user=-1, num_negatives=1, batch_size=32, shuffle=True, device=None):
self.input_matrix = input_matrix
self.num_positives_per_user = num_positives_per_user
self.num_negatives = num_negatives
self.as_numpy = as_numpy
self.batch_size = batch_size
self.shuffle = shuffle
self.device = device
self._construct()
def _construct(self):
num_users, num_items = self.input_matrix.shape
# self.users = []
# self.items = []
# for u in range(num_users):
# u_items = self.input_matrix[u].indices
# self.users += [u] * len(u_items)
# self.items += u_items.tolist()
# self.users = np.array(self.users)
# self.items = np.array(self.items)
self._data = self.sample_negatives()
self._num_data = len(self._data[0])
def sample_negatives(self):
num_users, num_items = self.input_matrix.shape
users = []
positives = []
negatives = []
for u in range(num_users):
u_pos_items = self.input_matrix[u].indices
num_pos_user = len(u_pos_items)
prob = np.ones(num_items)
prob[u_pos_items] = 0.0
prob = prob / sum(prob)
if self.num_positives_per_user > 0 and self.num_positives_per_user < num_pos_user:
# subsample
pos_sampled = np.random.choice(num_items, size=self.num_positives_per_user, replace=False)
neg_sampled = np.random.choice(num_items, size=self.num_positives_per_user, replace=False, p=prob)
else:
# sample all
pos_sampled = u_pos_items
neg_sampled = np.random.choice(num_items, size=num_pos_user, replace=False, p=prob)
assert len(pos_sampled) == len(neg_sampled)
users += [u] * len(neg_sampled)
positives += pos_sampled.tolist()
negatives += neg_sampled.tolist()
users = np.array(users)
positives = np.array(positives)
negatives = np.array(negatives)
return users, positives, negatives
def __len__(self):
return int(np.ceil(self._num_data / self.batch_size))
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self._num_data)
else:
perm = np.arange(self._num_data)
for b, st in enumerate(range(0, self._num_data, self.batch_size)):
ed = min(st + self.batch_size, self._num_data)
batch_idx = perm[st:ed]
batch_users = self._data[0][batch_idx]
batch_pos = self._data[1][batch_idx]
batch_neg = self._data[2][batch_idx]
if not self.as_numpy:
batch_users = torch.tensor(batch_users, dtype=torch.long, device=self.device)
batch_pos = torch.tensor(batch_pos, dtype=torch.long, device=self.device)
batch_neg = torch.tensor(batch_neg, dtype=torch.long, device=self.device)
yield batch_users, batch_pos, batch_neg | 8,480 | 36.861607 | 154 | py |
RecSys_PyTorch | RecSys_PyTorch-master/data/data_batcher.py | import torch
import numpy as np
class BatchSampler:
def __init__(self, data_size, batch_size, drop_remain=False, shuffle=False):
self.data_size = data_size
self.batch_size = batch_size
self.drop_remain = drop_remain
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self.data_size)
else:
perm = range(self.data_size)
batch_idx = []
for idx in perm:
batch_idx.append(idx)
if len(batch_idx) == self.batch_size:
yield batch_idx
batch_idx = []
if len(batch_idx) > 0 and not self.drop_remain:
yield batch_idx
def __len__(self):
if self.drop_remain:
return self.data_size // self.batch_size
else:
return int(np.ceil(self.data_size / self.batch_size))
class DataBatcher:
def __init__(self, *data_source, batch_size, drop_remain=False, shuffle=False):
self.data_source = list(data_source)
self.batch_size = batch_size
self.drop_remain = drop_remain
self.shuffle = shuffle
for i, d in enumerate(self.data_source):
if isinstance(d, list):
self.data_source[i] = np.array(d)
self.data_size = len(self.data_source[0])
if len(self.data_source)> 1:
flag = np.all([len(src) == self.data_size for src in self.data_source])
if not flag:
raise ValueError("All elements in data_source should have same lengths")
self.sampler = BatchSampler(self.data_size, self.batch_size, self.drop_remain, self.shuffle)
self.iterator = iter(self.sampler)
self.n=0
def __next__(self):
batch_idx = next(self.iterator)
batch_data = tuple([data[batch_idx] for data in self.data_source])
if len(batch_data) == 1:
batch_data = batch_data[0]
return batch_data
def __iter__(self):
return self
def __len__(self):
return len(self.sampler) | 2,085 | 30.606061 | 100 | py |
paac | paac-master/networks.py | import tensorflow as tf
import logging
import numpy as np
def flatten(_input):
shape = _input.get_shape().as_list()
dim = shape[1]*shape[2]*shape[3]
return tf.reshape(_input, [-1,dim], name='_flattened')
def conv2d(name, _input, filters, size, channels, stride, padding = 'VALID', init = "torch"):
w = conv_weight_variable([size,size,channels,filters],
name + '_weights', init = init)
b = conv_bias_variable([filters], size, size, channels,
name + '_biases', init = init)
conv = tf.nn.conv2d(_input, w, strides=[1, stride, stride, 1],
padding=padding, name=name + '_convs')
out = tf.nn.relu(tf.add(conv, b),
name='' + name + '_activations')
return w, b, out
def conv_weight_variable(shape, name, init = "torch"):
if init == "glorot_uniform":
receptive_field_size = np.prod(shape[:2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
d = np.sqrt(6. / (fan_in + fan_out))
else:
w = shape[0]
h = shape[1]
input_channels = shape[2]
d = 1.0 / np.sqrt(input_channels * w * h)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def conv_bias_variable(shape, w, h, input_channels, name, init= "torch"):
if init == "glorot_uniform":
initial = tf.zeros(shape)
else:
d = 1.0 / np.sqrt(input_channels * w * h)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def fc(name, _input, output_dim, activation = "relu", init = "torch"):
input_dim = _input.get_shape().as_list()[1]
w = fc_weight_variable([input_dim, output_dim],
name + '_weights', init = init)
b = fc_bias_variable([output_dim], input_dim,
'' + name + '_biases', init = init)
out = tf.add(tf.matmul(_input, w), b, name= name + '_out')
if activation == "relu":
out = tf.nn.relu(out, name='' + name + '_relu')
return w, b, out
def fc_weight_variable(shape, name, init="torch"):
if init == "glorot_uniform":
fan_in = shape[0]
fan_out = shape[1]
d = np.sqrt(6. / (fan_in + fan_out))
else:
input_channels = shape[0]
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def fc_bias_variable(shape, input_channels, name, init= "torch"):
if init=="glorot_uniform":
initial = tf.zeros(shape, dtype='float32')
else:
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def softmax(name, _input, output_dim):
input_dim = _input.get_shape().as_list()[1]
w = fc_weight_variable([input_dim, output_dim], name + '_weights')
b = fc_bias_variable([output_dim], input_dim, name + '_biases')
out = tf.nn.softmax(tf.add(tf.matmul(_input, w), b), name= name + '_policy')
return w, b, out
def log_softmax( name, _input, output_dim):
input_dim = _input.get_shape().as_list()[1]
w = fc_weight_variable([input_dim, output_dim], name + '_weights')
b = fc_bias_variable([output_dim], input_dim, name + '_biases')
out = tf.nn.log_softmax(tf.add(tf.matmul(_input, w), b), name= name + '_policy')
return w, b, out
class Network(object):
def __init__(self, conf):
self.name = conf['name']
self.num_actions = conf['num_actions']
self.clip_norm = conf['clip_norm']
self.clip_norm_type = conf['clip_norm_type']
self.device = conf['device']
with tf.device(self.device):
with tf.name_scope(self.name):
self.loss_scaling = 5.0
self.input_ph = tf.placeholder(tf.uint8, [None, 84, 84, 4], name='input')
self.selected_action_ph = tf.placeholder("float32", [None, self.num_actions], name="selected_action")
self.input = tf.scalar_mul(1.0/255.0, tf.cast(self.input_ph, tf.float32))
# This class should never be used, must be subclassed
# The output layer
self.output = None
def init(self, checkpoint_folder, saver, session):
last_saving_step = 0
with tf.device('/cpu:0'):
# Initialize network parameters
path = tf.train.latest_checkpoint(checkpoint_folder)
if path is None:
logging.info('Initializing all variables')
session.run(tf.global_variables_initializer())
else:
logging.info('Restoring network variables from previous run')
saver.restore(session, path)
last_saving_step = int(path[path.rindex('-')+1:])
return last_saving_step
class NIPSNetwork(Network):
def __init__(self, conf):
super(NIPSNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope(self.name):
_, _, conv1 = conv2d('conv1', self.input, 16, 8, 4, 4)
_, _, conv2 = conv2d('conv2', conv1, 32, 4, 16, 2)
_, _, fc3 = fc('fc3', flatten(conv2), 256, activation="relu")
self.output = fc3
class NatureNetwork(Network):
def __init__(self, conf):
super(NatureNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope(self.name):
_, _, conv1 = conv2d('conv1', self.input, 32, 8, 4, 4)
_, _, conv2 = conv2d('conv2', conv1, 64, 4, 32, 2)
_, _, conv3 = conv2d('conv3', conv2, 64, 3, 64, 1)
_, _, fc4 = fc('fc4', flatten(conv3), 512, activation="relu")
self.output = fc4
| 5,972 | 34.135294 | 117 | py |
brainiak | brainiak-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# toolkit documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 17 16:45:35 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from pkg_resources import get_distribution
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'myst_nb',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'brainiak'
copyright = '2016, Princeton Neuroscience Institute and Intel Corporation'
author = 'Princeton Neuroscience Institute and Intel Corporation'
version = get_distribution(project).version
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'brainiakdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'brainiak.tex', 'BrainIAK Documentation',
'Princeton Neuroscience Institute and Intel Corporation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'brainiak', 'BrainIAK Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'brainiak', 'BrainIAK Documentation',
author, 'brainiak', 'Brain Imaging Analysis Kit.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
napoleon_include_special_with_doc = True
# Myst-nb
execution_timeout = -1
jupyter_execute_notebooks = "force"
| 9,448 | 30.708054 | 79 | py |
DMGI | DMGI-master/main.py | import numpy as np
np.random.seed(0)
import torch
torch.autograd.set_detect_anomaly(True)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import argparse
def parse_args():
# input arguments
parser = argparse.ArgumentParser(description='DMGI')
parser.add_argument('--embedder', nargs='?', default='DMGI')
parser.add_argument('--dataset', nargs='?', default='imdb')
parser.add_argument('--metapaths', nargs='?', default='MAM,MDM')
parser.add_argument('--nb_epochs', type=int, default=10000)
parser.add_argument('--hid_units', type=int, default=64)
parser.add_argument('--lr', type = float, default = 0.0005)
parser.add_argument('--l2_coef', type=float, default=0.0001)
parser.add_argument('--drop_prob', type=float, default=0.5)
parser.add_argument('--reg_coef', type=float, default=0.001)
parser.add_argument('--sup_coef', type=float, default=0.1)
parser.add_argument('--sc', type=float, default=3.0, help='GCN self connection')
parser.add_argument('--margin', type=float, default=0.1)
parser.add_argument('--gpu_num', type=int, default=0)
parser.add_argument('--patience', type=int, default=20)
parser.add_argument('--nheads', type=int, default=1)
parser.add_argument('--activation', nargs='?', default='relu')
parser.add_argument('--isSemi', action='store_true', default=False)
parser.add_argument('--isBias', action='store_true', default=False)
parser.add_argument('--isAttn', action='store_true', default=False)
return parser.parse_known_args()
def printConfig(args):
args_names = []
args_vals = []
for arg in vars(args):
args_names.append(arg)
args_vals.append(getattr(args, arg))
print(args_names)
print(args_vals)
def main():
args, unknown = parse_args()
if args.embedder == 'DMGI':
from models import DMGI
embedder = DMGI(args)
elif args.embedder == 'DGI':
from models import DGI
embedder = DGI(args)
embedder.training()
if __name__ == '__main__':
main()
| 2,131 | 33.95082 | 84 | py |
DMGI | DMGI-master/evaluate.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from models import LogReg
import torch.nn as nn
import numpy as np
np.random.seed(0)
from sklearn.metrics import f1_score
from sklearn.cluster import KMeans
from sklearn.metrics import normalized_mutual_info_score, pairwise
def evaluate(embeds, idx_train, idx_val, idx_test, labels, device, isTest=True):
hid_units = embeds.shape[2]
nb_classes = labels.shape[2]
xent = nn.CrossEntropyLoss()
train_embs = embeds[0, idx_train]
val_embs = embeds[0, idx_val]
test_embs = embeds[0, idx_test]
train_lbls = torch.argmax(labels[0, idx_train], dim=1)
val_lbls = torch.argmax(labels[0, idx_val], dim=1)
test_lbls = torch.argmax(labels[0, idx_test], dim=1)
accs = []
micro_f1s = []
macro_f1s = []
macro_f1s_val = [] ##
for _ in range(50):
log = LogReg(hid_units, nb_classes)
opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
log.to(device)
val_accs = []; test_accs = []
val_micro_f1s = []; test_micro_f1s = []
val_macro_f1s = []; test_macro_f1s = []
for iter_ in range(50):
# train
log.train()
opt.zero_grad()
logits = log(train_embs)
loss = xent(logits, train_lbls)
loss.backward()
opt.step()
# val
logits = log(val_embs)
preds = torch.argmax(logits, dim=1)
val_acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0]
val_f1_macro = f1_score(val_lbls.cpu(), preds.cpu(), average='macro')
val_f1_micro = f1_score(val_lbls.cpu(), preds.cpu(), average='micro')
val_accs.append(val_acc.item())
val_macro_f1s.append(val_f1_macro)
val_micro_f1s.append(val_f1_micro)
# test
logits = log(test_embs)
preds = torch.argmax(logits, dim=1)
test_acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
test_f1_macro = f1_score(test_lbls.cpu(), preds.cpu(), average='macro')
test_f1_micro = f1_score(test_lbls.cpu(), preds.cpu(), average='micro')
test_accs.append(test_acc.item())
test_macro_f1s.append(test_f1_macro)
test_micro_f1s.append(test_f1_micro)
max_iter = val_accs.index(max(val_accs))
accs.append(test_accs[max_iter])
max_iter = val_macro_f1s.index(max(val_macro_f1s))
macro_f1s.append(test_macro_f1s[max_iter])
macro_f1s_val.append(val_macro_f1s[max_iter]) ###
max_iter = val_micro_f1s.index(max(val_micro_f1s))
micro_f1s.append(test_micro_f1s[max_iter])
if isTest:
print("\t[Classification] Macro-F1: {:.4f} ({:.4f}) | Micro-F1: {:.4f} ({:.4f})".format(np.mean(macro_f1s),
np.std(macro_f1s),
np.mean(micro_f1s),
np.std(micro_f1s)))
else:
return np.mean(macro_f1s_val), np.mean(macro_f1s)
test_embs = np.array(test_embs.cpu())
test_lbls = np.array(test_lbls.cpu())
run_kmeans(test_embs, test_lbls, nb_classes)
run_similarity_search(test_embs, test_lbls)
def run_similarity_search(test_embs, test_lbls):
numRows = test_embs.shape[0]
cos_sim_array = pairwise.cosine_similarity(test_embs) - np.eye(numRows)
st = []
for N in [5, 10, 20, 50, 100]:
indices = np.argsort(cos_sim_array, axis=1)[:, -N:]
tmp = np.tile(test_lbls, (numRows, 1))
selected_label = tmp[np.repeat(np.arange(numRows), N), indices.ravel()].reshape(numRows, N)
original_label = np.repeat(test_lbls, N).reshape(numRows,N)
st.append(str(np.round(np.mean(np.sum((selected_label == original_label), 1) / N),4)))
st = ','.join(st)
print("\t[Similarity] [5,10,20,50,100] : [{}]".format(st))
def run_kmeans(x, y, k):
estimator = KMeans(n_clusters=k)
NMI_list = []
for i in range(10):
estimator.fit(x)
y_pred = estimator.predict(x)
s1 = normalized_mutual_info_score(y, y_pred, average_method='arithmetic')
NMI_list.append(s1)
s1 = sum(NMI_list) / len(NMI_list)
print('\t[Clustering] NMI: {:.4f}'.format(s1)) | 4,571 | 34.71875 | 115 | py |
DMGI | DMGI-master/embedder.py | import time
import numpy as np
import torch
from utils import process
import torch.nn as nn
from layers import AvgReadout
class embedder:
def __init__(self, args):
args.batch_size = 1
args.sparse = True
args.metapaths_list = args.metapaths.split(",")
args.gpu_num_ = args.gpu_num
if args.gpu_num_ == 'cpu':
args.device = 'cpu'
else:
args.device = torch.device("cuda:" + str(args.gpu_num_) if torch.cuda.is_available() else "cpu")
adj, features, labels, idx_train, idx_val, idx_test = process.load_data_dblp(args)
features = [process.preprocess_features(feature) for feature in features]
args.nb_nodes = features[0].shape[0]
args.ft_size = features[0].shape[1]
args.nb_classes = labels.shape[1]
args.nb_graphs = len(adj)
args.adj = adj
adj = [process.normalize_adj(adj_) for adj_ in adj]
self.adj = [process.sparse_mx_to_torch_sparse_tensor(adj_) for adj_ in adj]
self.features = [torch.FloatTensor(feature[np.newaxis]) for feature in features]
self.labels = torch.FloatTensor(labels[np.newaxis]).to(args.device)
self.idx_train = torch.LongTensor(idx_train).to(args.device)
self.idx_val = torch.LongTensor(idx_val).to(args.device)
self.idx_test = torch.LongTensor(idx_test).to(args.device)
self.train_lbls = torch.argmax(self.labels[0, self.idx_train], dim=1)
self.val_lbls = torch.argmax(self.labels[0, self.idx_val], dim=1)
self.test_lbls = torch.argmax(self.labels[0, self.idx_test], dim=1)
# How to aggregate
args.readout_func = AvgReadout()
# Summary aggregation
args.readout_act_func = nn.Sigmoid()
self.args = args
def currentTime(self):
now = time.localtime()
s = "%04d-%02d-%02d %02d:%02d:%02d" % (
now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
return s
| 1,994 | 35.272727 | 108 | py |
DMGI | DMGI-master/models/logreg.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
import torch.nn.functional as F
class LogReg(nn.Module):
def __init__(self, ft_in, nb_classes):
super(LogReg, self).__init__()
self.fc = nn.Linear(ft_in, nb_classes)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, seq):
ret = self.fc(seq)
return ret
| 697 | 24.851852 | 56 | py |
DMGI | DMGI-master/models/DMGI.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
from embedder import embedder
from layers import GCN, Discriminator, Attention
import numpy as np
np.random.seed(0)
from evaluate import evaluate
from models import LogReg
import pickle as pkl
class DMGI(embedder):
def __init__(self, args):
embedder.__init__(self, args)
self.args = args
def training(self):
features = [feature.to(self.args.device) for feature in self.features]
adj = [adj_.to(self.args.device) for adj_ in self.adj]
model = modeler(self.args).to(self.args.device)
optimiser = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.l2_coef)
cnt_wait = 0; best = 1e9
b_xent = nn.BCEWithLogitsLoss()
xent = nn.CrossEntropyLoss()
for epoch in range(self.args.nb_epochs):
xent_loss = None
model.train()
optimiser.zero_grad()
idx = np.random.permutation(self.args.nb_nodes)
shuf = [feature[:, idx, :] for feature in features]
shuf = [shuf_ft.to(self.args.device) for shuf_ft in shuf]
lbl_1 = torch.ones(self.args.batch_size, self.args.nb_nodes)
lbl_2 = torch.zeros(self.args.batch_size, self.args.nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1).to(self.args.device)
result = model(features, adj, shuf, self.args.sparse, None, None, None)
logits = result['logits']
for view_idx, logit in enumerate(logits):
if xent_loss is None:
xent_loss = b_xent(logit, lbl)
else:
xent_loss += b_xent(logit, lbl)
loss = xent_loss
reg_loss = result['reg_loss']
loss += self.args.reg_coef * reg_loss
if self.args.isSemi:
sup = result['semi']
semi_loss = xent(sup[self.idx_train], self.train_lbls)
loss += self.args.sup_coef * semi_loss
if loss < best:
best = loss
cnt_wait = 0
torch.save(model.state_dict(), 'saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, self.args.metapaths))
else:
cnt_wait += 1
if cnt_wait == self.args.patience:
break
loss.backward()
optimiser.step()
model.load_state_dict(torch.load('saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, self.args.metapaths)))
# Evaluation
model.eval()
evaluate(model.H.data.detach(), self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
class modeler(nn.Module):
def __init__(self, args):
super(modeler, self).__init__()
self.args = args
self.gcn = nn.ModuleList([GCN(args.ft_size, args.hid_units, args.activation, args.drop_prob, args.isBias) for _ in range(args.nb_graphs)])
self.disc = Discriminator(args.hid_units)
self.H = nn.Parameter(torch.FloatTensor(1, args.nb_nodes, args.hid_units))
self.readout_func = self.args.readout_func
if args.isAttn:
self.attn = nn.ModuleList([Attention(args) for _ in range(args.nheads)])
if args.isSemi:
self.logistic = LogReg(args.hid_units, args.nb_classes).to(args.device)
self.init_weight()
def init_weight(self):
nn.init.xavier_normal_(self.H)
def forward(self, feature, adj, shuf, sparse, msk, samp_bias1, samp_bias2):
h_1_all = []; h_2_all = []; c_all = []; logits = []
result = {}
for i in range(self.args.nb_graphs):
h_1 = self.gcn[i](feature[i], adj[i], sparse)
# how to readout positive summary vector
c = self.readout_func(h_1)
c = self.args.readout_act_func(c) # equation 9
h_2 = self.gcn[i](shuf[i], adj[i], sparse)
logit = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)
h_1_all.append(h_1)
h_2_all.append(h_2)
c_all.append(c)
logits.append(logit)
result['logits'] = logits
# Attention or not
if self.args.isAttn:
h_1_all_lst = []; h_2_all_lst = []; c_all_lst = []
for h_idx in range(self.args.nheads):
h_1_all_, h_2_all_, c_all_ = self.attn[h_idx](h_1_all, h_2_all, c_all)
h_1_all_lst.append(h_1_all_); h_2_all_lst.append(h_2_all_); c_all_lst.append(c_all_)
h_1_all = torch.mean(torch.cat(h_1_all_lst, 0), 0).unsqueeze(0)
h_2_all = torch.mean(torch.cat(h_2_all_lst, 0), 0).unsqueeze(0)
else:
h_1_all = torch.mean(torch.cat(h_1_all), 0).unsqueeze(0)
h_2_all = torch.mean(torch.cat(h_2_all), 0).unsqueeze(0)
# consensus regularizer
pos_reg_loss = ((self.H - h_1_all) ** 2).sum()
neg_reg_loss = ((self.H - h_2_all) ** 2).sum()
reg_loss = pos_reg_loss - neg_reg_loss
result['reg_loss'] = reg_loss
# semi-supervised module
if self.args.isSemi:
semi = self.logistic(self.H).squeeze(0)
result['semi'] = semi
return result | 5,373 | 34.826667 | 146 | py |
DMGI | DMGI-master/models/DGI.py | # Code based on https://github.com/PetarV-/DGI/blob/master/models/dgi.py
import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
from embedder import embedder
from layers import GCN, Discriminator
import numpy as np
np.random.seed(0)
from evaluate import evaluate
class DGI(embedder):
def __init__(self, args):
embedder.__init__(self, args)
self.args = args
def training(self):
features_lst = [feature.to(self.args.device) for feature in self.features]
adj_lst = [adj_.to(self.args.device) for adj_ in self.adj]
final_embeds = []
for m_idx, (features, adj) in enumerate(zip(features_lst, adj_lst)):
metapath = self.args.metapaths_list[m_idx]
print("- Training on {}".format(metapath))
model = modeler(self.args).to(self.args.device)
optimiser = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.l2_coef)
cnt_wait = 0; best = 1e9
b_xent = nn.BCEWithLogitsLoss()
for epoch in range(self.args.nb_epochs):
model.train()
optimiser.zero_grad()
idx = np.random.permutation(self.args.nb_nodes)
shuf_fts = features[:, idx, :].to(self.args.device)
lbl_1 = torch.ones(self.args.batch_size, self.args.nb_nodes)
lbl_2 = torch.zeros(self.args.batch_size, self.args.nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1)
lbl = lbl.to(self.args.device)
logits = model(features, shuf_fts, adj, self.args.sparse, None, None, None)
loss = b_xent(logits, lbl)
if loss < best:
best = loss
cnt_wait = 0
torch.save(model.state_dict(), 'saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, metapath))
else:
cnt_wait += 1
if cnt_wait == self.args.patience:
break
loss.backward()
optimiser.step()
model.load_state_dict(torch.load('saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, metapath)))
# Evaluation
embeds, _ = model.embed(features, adj, self.args.sparse)
evaluate(embeds, self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
final_embeds.append(embeds)
embeds = torch.mean(torch.cat(final_embeds), 0).unsqueeze(0)
print("- Integrated")
evaluate(embeds, self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
class modeler(nn.Module):
def __init__(self, args):
super(modeler, self).__init__()
self.args = args
self.gcn = GCN(args.ft_size, args.hid_units, args.activation, args.drop_prob, args.isBias)
# one discriminator
self.disc = Discriminator(args.hid_units)
self.readout_func = self.args.readout_func
def forward(self, seq1, seq2, adj, sparse, msk, samp_bias1, samp_bias2):
h_1 = self.gcn(seq1, adj, sparse)
c = self.readout_func(h_1) # equation 9
c = self.args.readout_act_func(c)
h_2 = self.gcn(seq2, adj, sparse)
ret = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)
return ret
# Detach the return variables
def embed(self, seq, adj, sparse):
h_1 = self.gcn(seq, adj, sparse)
c = self.readout_func(h_1) # positive summary vector
c = self.args.readout_act_func(c) # equation 9
return h_1.detach(), c.detach()
| 3,756 | 35.125 | 139 | py |
DMGI | DMGI-master/layers/discriminator.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, n_h):
super(Discriminator, self).__init__()
self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = torch.unsqueeze(c, 1) # c: summary vector, h_pl: positive, h_mi: negative
c_x = c_x.expand_as(h_pl)
sc_1 = torch.squeeze(self.f_k_bilinear(h_pl, c_x), 2) # sc_1 = 1 x nb_nodes
sc_2 = torch.squeeze(self.f_k_bilinear(h_mi, c_x), 2) # sc_2 = 1 x nb_nodes
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits | 1,143 | 30.777778 | 87 | py |
DMGI | DMGI-master/layers/readout.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
class AvgReadout(nn.Module):
def __init__(self):
super(AvgReadout, self).__init__()
def forward(self, seq):
return torch.mean(seq, 1) | 326 | 24.153846 | 42 | py |
DMGI | DMGI-master/layers/gcn.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
import torch.nn.functional as F
import pdb
import math
class GCN(nn.Module):
def __init__(self, in_ft, out_ft, act, drop_prob, isBias=False):
super(GCN, self).__init__()
self.fc_1 = nn.Linear(in_ft, out_ft, bias=False)
if act == 'prelu':
self.act = nn.PReLU()
elif act == 'relu':
self.act = nn.ReLU()
elif act == 'leakyrelu':
self.act = nn.LeakyReLU()
elif act == 'relu6':
self.act = nn.ReLU6()
elif act == 'rrelu':
self.act = nn.RReLU()
elif act == 'selu':
self.act = nn.SELU()
elif act == 'celu':
self.act = nn.CELU()
elif act == 'sigmoid':
self.act = nn.Sigmoid()
elif act == 'identity':
self.act = nn.Identity()
if isBias:
self.bias_1 = nn.Parameter(torch.FloatTensor(out_ft))
self.bias_1.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
self.drop_prob = drop_prob
self.isBias = isBias
# self.reset_parameters()
# def reset_parameters(self):
# # pdb.set_trace()
# stdv = 1. / math.sqrt(self.fc_1.weight.data.size(0))
# self.fc_1.weight.data.uniform_(-stdv, stdv)
# if self.bias_1 is not None:
# self.bias_1.data.uniform_(-stdv, stdv)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
# Shape of seq: (batch, nodes, features)
def forward(self, seq, adj, sparse=False):
seq = F.dropout(seq, self.drop_prob, training=self.training)
seq = self.fc_1(seq)
if sparse:
seq = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq, 0)), 0)
else:
seq = torch.bmm(adj, seq)
if self.isBias:
seq += self.bias_1
return self.act(seq)
| 2,239 | 28.473684 | 76 | py |
DMGI | DMGI-master/layers/attention.py | import torch.nn as nn
import torch
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, args):
super(Attention, self).__init__()
self.args = args
self.A = nn.ModuleList([nn.Linear(args.hid_units, 1) for _ in range(args.nb_graphs)])
self.weight_init()
def weight_init(self):
for i in range(self.args.nb_graphs):
nn.init.xavier_normal_(self.A[i].weight)
self.A[i].bias.data.fill_(0.0)
def forward(self, feat_pos, feat_neg, summary):
feat_pos, feat_pos_attn = self.attn_feature(feat_pos)
feat_neg, feat_neg_attn = self.attn_feature(feat_neg)
summary, summary_attn = self.attn_summary(summary)
return feat_pos, feat_neg, summary
def attn_feature(self, features):
features_attn = []
for i in range(self.args.nb_graphs):
features_attn.append((self.A[i](features[i].squeeze())))
features_attn = F.softmax(torch.cat(features_attn, 1), -1)
features = torch.cat(features,1).squeeze(0)
features_attn_reshaped = features_attn.transpose(1, 0).contiguous().view(-1, 1)
features = features * features_attn_reshaped.expand_as(features)
features = features.view(self.args.nb_graphs, self.args.nb_nodes, self.args.hid_units).sum(0).unsqueeze(0)
return features, features_attn
def attn_summary(self, features):
features_attn = []
for i in range(self.args.nb_graphs):
features_attn.append((self.A[i](features[i].squeeze())))
features_attn = F.softmax(torch.cat(features_attn), dim=-1).unsqueeze(1)
features = torch.cat(features, 0)
features_attn_expanded = features_attn.expand_as(features)
features = (features * features_attn_expanded).sum(0).unsqueeze(0)
return features, features_attn
| 1,865 | 37.875 | 114 | py |
DMGI | DMGI-master/utils/process.py | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
import sys
import torch
import torch.nn as nn
import scipy.io as sio
import pdb
def load_data_dblp(args):
dataset = args.dataset
metapaths = args.metapaths_list
sc = args.sc
if dataset == 'acm':
data = sio.loadmat('data/{}.mat'.format(dataset))
else:
data = pkl.load(open('data/{}.pkl'.format(dataset), "rb"))
label = data['label']
N = label.shape[0]
truefeatures = data['feature'].astype(float)
rownetworks = [data[metapath] + np.eye(N)*sc for metapath in metapaths]
rownetworks = [sp.csr_matrix(rownetwork) for rownetwork in rownetworks]
truefeatures = sp.lil_matrix(truefeatures)
idx_train = data['train_idx'].ravel()
idx_val = data['val_idx'].ravel()
idx_test = data['test_idx'].ravel()
truefeatures_list = []
for _ in range(len(rownetworks)):
truefeatures_list.append(truefeatures)
return rownetworks, truefeatures_list, label, idx_train, idx_val, idx_test
def parse_skipgram(fname):
with open(fname) as f:
toks = list(f.read().split())
nb_nodes = int(toks[0])
nb_features = int(toks[1])
ret = np.empty((nb_nodes, nb_features))
it = 2
for i in range(nb_nodes):
cur_nd = int(toks[it]) - 1
it += 1
for j in range(nb_features):
cur_ft = float(toks[it])
ret[cur_nd][j] = cur_ft
it += 1
return ret
# Process a (subset of) a TU dataset into standard form
def process_tu(data, nb_nodes):
nb_graphs = len(data)
ft_size = data.num_features
features = np.zeros((nb_graphs, nb_nodes, ft_size))
adjacency = np.zeros((nb_graphs, nb_nodes, nb_nodes))
labels = np.zeros(nb_graphs)
sizes = np.zeros(nb_graphs, dtype=np.int32)
masks = np.zeros((nb_graphs, nb_nodes))
for g in range(nb_graphs):
sizes[g] = data[g].x.shape[0]
features[g, :sizes[g]] = data[g].x
labels[g] = data[g].y[0]
masks[g, :sizes[g]] = 1.0
e_ind = data[g].edge_index
coo = sp.coo_matrix((np.ones(e_ind.shape[1]), (e_ind[0, :], e_ind[1, :])), shape=(nb_nodes, nb_nodes))
adjacency[g] = coo.todense()
return features, adjacency, labels, sizes, masks
def micro_f1(logits, labels):
# Compute predictions
preds = torch.round(nn.Sigmoid()(logits))
# Cast to avoid trouble
preds = preds.long()
labels = labels.long()
# Count true positives, true negatives, false positives, false negatives
tp = torch.nonzero(preds * labels).shape[0] * 1.0
tn = torch.nonzero((preds - 1) * (labels - 1)).shape[0] * 1.0
fp = torch.nonzero(preds * (labels - 1)).shape[0] * 1.0
fn = torch.nonzero((preds - 1) * labels).shape[0] * 1.0
# Compute micro-f1 score
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = (2 * prec * rec) / (prec + rec)
return f1
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
"""
Prepare adjacency matrix by expanding up to a given neighbourhood.
This will insert loops on every node.
Finally, the matrix is converted to bias vectors.
Expected shape: [graph, nodes, nodes]
"""
def adj_to_bias(adj, sizes, nhood=1):
nb_graphs = adj.shape[0]
mt = np.empty(adj.shape)
for g in range(nb_graphs):
mt[g] = np.eye(adj.shape[1])
for _ in range(nhood):
mt[g] = np.matmul(mt[g], (adj[g] + np.eye(adj.shape[1])))
for i in range(sizes[g]):
for j in range(sizes[g]):
if mt[g][i][j] > 0.0:
mt[g][i][j] = 1.0
return -1e9 * (1.0 - mt)
###############################################
# This section of code adapted from tkipf/gcn #
###############################################
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset_str): # {'pubmed', 'citeseer', 'cora'}
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
return adj, features, labels, idx_train, idx_val, idx_test
def sparse_to_tuple(sparse_mx, insert_batch=False):
"""Convert sparse matrix to tuple representation."""
"""Set insert_batch=True if you want to insert a batch dimension."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
if insert_batch:
coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
values = mx.data
shape = (1,) + mx.shape
else:
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def standardize_data(f, train_mask):
"""Standardize feature matrix and convert to tuple representation"""
# standardize data
f = f.todense()
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = f[:, np.squeeze(np.array(sigma > 0))]
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = (f - mu) / sigma
return f
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features.todense()
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def process_adj_gat(adj):
# adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# Tricky implementation of official GAT
adj = (adj + sp.eye(adj.shape[0])).todense()
for x in range(0, adj.shape[0]):
for y in range(0, adj.shape[1]):
if adj[x, y] == 0:
adj[x, y] = -9e15
elif adj[x, y] >= 1:
adj[x, y] = 0
else:
print(adj[x, y], 'error')
adj = torch.FloatTensor(np.array(adj))
# adj = sp.coo_matrix(adj)
return adj | 8,988 | 33.178707 | 110 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.