repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/molecules_graph_regression/bi_gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.bi_gated_gcn_layer import biGatedGCNLayer
from layers.gated_gcn_layer import GatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
num_atom_type = net_params['num_atom_type']
num_bond_type = net_params['num_bond_type']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sg_flag = True
self.sigma = net_params['sigma']
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
if self.edge_feat:
self.embedding_e = nn.Embedding(num_bond_type, hidden_dim)
else:
self.embedding_e = nn.Linear(1, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, residual=self.residual))
for _ in range(n_layers-3):
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
if not self.edge_feat: # edge feature set to 1
e = torch.ones(e.size(0),1).to(self.device)
e = self.embedding_e(e)
# convnets
cnt = 0
for conv in self.layers:
if cnt == 1:
h, e, self.s = conv(g, h, e)
else:
h, e = conv(g, h, e)
cnt+=1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, scores, targets):
# loss = nn.MSELoss()(scores,targets)
loss = nn.L1Loss()(scores, targets)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 5,158 | 40.272 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/molecules_graph_regression/bi_graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.bi_graphsage_layer import biGraphSageLayer
from layers.mlp_readout_layer import MLPReadout
class biGraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
num_atom_type = net_params['num_atom_type']
num_bond_type = net_params['num_bond_type']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) ])
self.layers.append(biGraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, self.assign_dim,self.sigma, residual))
for _ in range(n_layers-3):
self.layers.append(GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual))
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt += 1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, scores, targets):
# loss = nn.MSELoss()(scores,targets)
loss = nn.L1Loss()(scores, targets)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,479 | 40.869159 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/molecules_graph_regression/bi_gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
import dgl
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.bi_gat_layer import biGATLayer
from layers.mlp_readout_layer import MLPReadout
class biGATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
num_atom_type = net_params['num_atom_type']
num_bond_type = net_params['num_bond_type']
hidden_dim = net_params['hidden_dim']
self.num_heads = net_params['n_heads']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.pos_enc = net_params['pos_enc']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.sigma = net_params['sigma']
self.dropout = dropout
self.embedding_h = nn.Embedding(num_atom_type, hidden_dim*self.num_heads)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual) ])
self.layers.append(biGATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.assign_dim,self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual))
self.layers.append(GATLayer(hidden_dim * self.num_heads, out_dim, 1,
dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt += 1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, scores, targets):
# loss = nn.MSELoss()(scores,targets)
loss = nn.L1Loss()(scores, targets)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,553 | 38.947368 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/graphsage_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import SAGEConv
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
class GraphSageLayer(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.aggregator_type = aggregator_type
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
if dgl_builtin == False:
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout,
bias=bias)
if aggregator_type == "maxpool":
self.aggregator = MaxPoolAggregator(in_feats, in_feats,
activation, bias)
elif aggregator_type == "lstm":
self.aggregator = LSTMAggregator(in_feats, in_feats)
else:
self.aggregator = MeanAggregator()
else:
self.sageconv = SAGEConv(in_feats, out_feats, aggregator_type,
dropout, activation=activation)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def forward(self, g, h):
h_in = h # for residual connection
if self.dgl_builtin == False:
h = self.dropout(h)
g.ndata['h'] = h
#g.update_all(fn.copy_src(src='h', out='m'),
# self.aggregator,
# self.nodeapply)
if self.aggregator_type == 'maxpool':
g.ndata['h'] = self.aggregator.linear(g.ndata['h'])
g.ndata['h'] = self.aggregator.activation(g.ndata['h'])
g.update_all(fn.copy_src('h', 'm'), fn.max('m', 'c'), self.nodeapply)
elif self.aggregator_type == 'lstm':
g.update_all(fn.copy_src(src='h', out='m'),
self.aggregator,
self.nodeapply)
else:
g.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'c'), self.nodeapply)
h = g.ndata['h']
else:
h = self.sageconv(g, h)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, aggregator={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.aggregator_type, self.residual)
"""
Aggregators for GraphSage
"""
class Aggregator(nn.Module):
"""
Base Aggregator class.
"""
def __init__(self):
super().__init__()
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
def aggre(self, neighbour):
# N x F
raise NotImplementedError
class MeanAggregator(Aggregator):
"""
Mean Aggregator for graphsage
"""
def __init__(self):
super().__init__()
def aggre(self, neighbour):
mean_neighbour = torch.mean(neighbour, dim=1)
return mean_neighbour
class MaxPoolAggregator(Aggregator):
"""
Maxpooling aggregator for graphsage
"""
def __init__(self, in_feats, out_feats, activation, bias):
super().__init__()
self.linear = nn.Linear(in_feats, out_feats, bias=bias)
self.activation = activation
def aggre(self, neighbour):
neighbour = self.linear(neighbour)
if self.activation:
neighbour = self.activation(neighbour)
maxpool_neighbour = torch.max(neighbour, dim=1)[0]
return maxpool_neighbour
class LSTMAggregator(Aggregator):
"""
LSTM aggregator for graphsage
"""
def __init__(self, in_feats, hidden_feats):
super().__init__()
self.lstm = nn.LSTM(in_feats, hidden_feats, batch_first=True)
self.hidden_dim = hidden_feats
self.hidden = self.init_hidden()
nn.init.xavier_uniform_(self.lstm.weight,
gain=nn.init.calculate_gain('relu'))
def init_hidden(self):
"""
Defaulted to initialite all zero
"""
return (torch.zeros(1, 1, self.hidden_dim),
torch.zeros(1, 1, self.hidden_dim))
def aggre(self, neighbours):
"""
aggregation function
"""
# N X F
rand_order = torch.randperm(neighbours.size()[1])
neighbours = neighbours[:, rand_order, :]
(lstm_out, self.hidden) = self.lstm(neighbours.view(neighbours.size()[0], neighbours.size()[1], -1))
return lstm_out[:, -1, :]
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
class NodeApply(nn.Module):
"""
Works -> the node_apply function in DGL paradigm
"""
def __init__(self, in_feats, out_feats, activation, dropout, bias=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.linear = nn.Linear(in_feats * 2, out_feats, bias)
self.activation = activation
def concat(self, h, aggre_result):
bundle = torch.cat((h, aggre_result), 1)
bundle = self.linear(bundle)
return bundle
def forward(self, node):
h = node.data['h']
c = node.data['c']
bundle = self.concat(h, c)
bundle[torch.isinf(bundle)] = 1e+9 # clamping
bundle = F.normalize(bundle, p=2, dim=1)
if self.activation:
bundle = self.activation(bundle)
return {"h": bundle}
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GraphSageLayerEdgeFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.src['Bh'] + edges.dst['Bh'] # e_ij = Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h):
h_in = h # for residual connection
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual)
##############################################################
class GraphSageLayerEdgeReprFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.C = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
self.batchnorm_e = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.data['Ce'] + edges.src['Bh'] + edges.dst['Bh'] # e_ij = Ce_ij + Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
e = g.edata['e']
if self.activation:
e = self.activation(e) # non-linear activation
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual) | 11,577 | 30.207547 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/mlp_readout_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
"""
MLP Layer used after graph vector representation
"""
class MLPReadout(nn.Module):
def __init__(self, input_dim, output_dim, L=2): #L=nb_hidden_layers
super().__init__()
list_FC_layers = [ nn.Linear( input_dim//2**l , input_dim//2**(l+1) , bias=True ) for l in range(L) ]
list_FC_layers.append(nn.Linear( input_dim//2**L , output_dim , bias=True ))
self.FC_layers = nn.ModuleList(list_FC_layers)
self.L = L
def forward(self, x):
y = x
for l in range(self.L):
y = self.FC_layers[l](y)
y = F.relu(y)
y = self.FC_layers[self.L](y)
return y | 726 | 29.291667 | 109 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/bi_graphsage_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
class biGraphSageLayer(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, assign_dim, sigma=1,
residual=False, bias=True):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.aggregator_type = aggregator_type
self.residual = residual
self.assign_dim = assign_dim
self.sigma = sigma
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout,
bias=True)
if self.aggregator_type == "maxpool":
self.maxlinear = nn.Linear(in_feats, out_feats, bias=True)
self.s1 = nn.Linear(in_feats, out_feats, bias=True)
self.s2 = nn.Linear(out_feats, self.assign_dim, bias=True)
self.metric = nn.Linear( self.assign_dim, self.assign_dim, bias=True)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
return {'z': edges.src['h'], 'sigma_GD': edges.data['GD']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['sigma_GD'], dim=1)
if self.aggregator_type == "maxpool":
c = torch.max(alpha * nodes.mailbox['z'], dim=1)[0]
else:
c = torch.mean(alpha * nodes.mailbox['z'], dim=1)
return {'c': c}
def forward(self, g, h):
h_in = h # for residual connection
h = self.dropout(h)
g.ndata['Sh'] = F.softmax(self.s2(F.relu(self.s1(h))),dim=1) # soft assignment
g.apply_edges(fn.u_sub_v('Sh', 'Sh', 'Sd')) # for cluster distance: (si - sj)
Sd = g.edata['Sd'] # sum_edges, assign_dim
Sd_h = self.metric(Sd) # sum_edges, assign_dim; D = sqrt( (si - sj) W W^t (si - sj)^t )
D = torch.sqrt(torch.sum(Sd_h*Sd_h,dim=1)+ 1e-09).unsqueeze(1) # sum_edges, 1
g.edata['GD'] = torch.exp( (-D / ( 2*(self.sigma**2) ))+ 1e-09) # sum_edges, 1 # G = GaussianRBF(D)
g.edata['sigma_GD'] = torch.sigmoid(g.edata['GD'])
if self.aggregator_type == "maxpool":
g.ndata['h'] = F.relu(self.maxlinear(h))
else:
g.ndata['h'] = h
#g.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'c'), self.nodeapply)
g.update_all(self.message_func, self.reduce_func, self.nodeapply)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h, g.ndata['Sh']
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.residual)
"""
Aggregators for GraphSage
"""
class Aggregator(nn.Module):
"""
Base Aggregator class.
"""
def __init__(self):
super().__init__()
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
def aggre(self, neighbour):
# N x F
raise NotImplementedError
class MeanAggregator(Aggregator):
"""
Mean Aggregator for graphsage
"""
def __init__(self):
super().__init__()
def aggre(self, neighbour):
mean_neighbour = torch.mean(neighbour, dim=1)
return mean_neighbour
class MaxPoolAggregator(Aggregator):
"""
Maxpooling aggregator for graphsage
"""
def __init__(self, in_feats, out_feats, activation, bias):
super().__init__()
self.linear = nn.Linear(in_feats, out_feats, bias=bias)
self.activation = activation
def aggre(self, neighbour):
neighbour = self.linear(neighbour)
if self.activation:
neighbour = self.activation(neighbour)
maxpool_neighbour = torch.max(neighbour, dim=1)[0]
return maxpool_neighbour
class LSTMAggregator(Aggregator):
"""
LSTM aggregator for graphsage
"""
def __init__(self, in_feats, hidden_feats):
super().__init__()
self.lstm = nn.LSTM(in_feats, hidden_feats, batch_first=True)
self.hidden_dim = hidden_feats
self.hidden = self.init_hidden()
nn.init.xavier_uniform_(self.lstm.weight,
gain=nn.init.calculate_gain('relu'))
def init_hidden(self):
"""
Defaulted to initialite all zero
"""
return (torch.zeros(1, 1, self.hidden_dim),
torch.zeros(1, 1, self.hidden_dim))
def aggre(self, neighbours):
"""
aggregation function
"""
# N X F
rand_order = torch.randperm(neighbours.size()[1])
neighbours = neighbours[:, rand_order, :]
(lstm_out, self.hidden) = self.lstm(neighbours.view(neighbours.size()[0], neighbours.size()[1], -1))
return lstm_out[:, -1, :]
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
class NodeApply(nn.Module):
"""
Works -> the node_apply function in DGL paradigm
"""
def __init__(self, in_feats, out_feats, activation, dropout, bias=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.linear = nn.Linear(in_feats * 2, out_feats, bias)
self.activation = activation
def concat(self, h, aggre_result):
bundle = torch.cat((h, aggre_result), 1)
bundle = self.linear(bundle)
return bundle
def forward(self, node):
h = node.data['h']
c = node.data['c']
bundle = self.concat(h, c)
bundle = F.normalize(bundle, p=2, dim=1)
if self.activation:
bundle = self.activation(bundle)
return {"h": bundle}
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GraphSageLayerEdgeFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.src['Bh'] + edges.dst['Bh'] # e_ij = Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h):
h_in = h # for residual connection
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual)
##############################################################
class GraphSageLayerEdgeReprFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.C = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
self.batchnorm_e = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.data['Ce'] + edges.src['Bh'] + edges.dst['Bh'] # e_ij = Ce_ij + Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
e = g.edata['e']
if self.activation:
e = self.activation(e) # non-linear activation
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual) | 11,753 | 30.260638 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/model_utils.py | import torch as th
from torch.autograd import Function
def batch2tensor(batch_adj, batch_feat, node_per_pool_graph):
"""
transform a batched graph to batched adjacency tensor and node feature tensor
"""
batch_size = int(batch_adj.size()[0] / node_per_pool_graph)
adj_list = []
feat_list = []
for i in range(batch_size):
start = i * node_per_pool_graph
end = (i + 1) * node_per_pool_graph
adj_list.append(batch_adj[start:end, start:end])
feat_list.append(batch_feat[start:end, :])
adj_list = list(map(lambda x: th.unsqueeze(x, 0), adj_list))
feat_list = list(map(lambda x: th.unsqueeze(x, 0), feat_list))
adj = th.cat(adj_list, dim=0)
feat = th.cat(feat_list, dim=0)
return feat, adj
def masked_softmax(matrix, mask, dim=-1, memory_efficient=True,
mask_fill_value=-1e32):
'''
masked_softmax for dgl batch graph
code snippet contributed by AllenNLP (https://github.com/allenai/allennlp)
'''
if mask is None:
result = th.nn.functional.softmax(matrix, dim=dim)
else:
mask = mask.float()
while mask.dim() < matrix.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
result = th.nn.functional.softmax(matrix * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_matrix = matrix.masked_fill((1 - mask).byte(),
mask_fill_value)
result = th.nn.functional.softmax(masked_matrix, dim=dim)
return result | 1,648 | 35.644444 | 81 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/gated_gcn_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class GatedGCNLayer(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.apply_edges(fn.u_add_v('Dh', 'Eh', 'DEh'))
g.edata['e'] = g.edata['DEh'] + g.edata['Ce']
g.edata['sigma'] = torch.sigmoid(g.edata['e'])
g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'), fn.sum('m', 'sum_sigma_h'))
g.update_all(fn.copy_e('sigma', 'm'), fn.sum('m', 'sum_sigma'))
g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_sigma_h'] / (g.ndata['sum_sigma'] + 1e-6)
#g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GatedGCNLayerEdgeFeatOnly(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
def forward(self, g, h, e):
h_in = h # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
#g.update_all(self.message_func,self.reduce_func)
g.apply_edges(fn.u_add_v('Dh', 'Eh', 'e'))
g.edata['sigma'] = torch.sigmoid(g.edata['e'])
g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'), fn.sum('m', 'sum_sigma_h'))
g.update_all(fn.copy_e('sigma', 'm'), fn.sum('m', 'sum_sigma'))
g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_sigma_h'] / (g.ndata['sum_sigma'] + 1e-6)
h = g.ndata['h'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
##############################################################
class GatedGCNLayerIsotropic(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
def forward(self, g, h, e):
h_in = h # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
#g.update_all(self.message_func,self.reduce_func)
g.update_all(fn.copy_u('Bh', 'm'), fn.sum('m', 'sum_h'))
g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_h']
h = g.ndata['h'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
| 6,852 | 33.964286 | 111 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/gat_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
class GATLayer(nn.Module):
"""
Parameters
----------
in_dim :
Number of input features.
out_dim :
Number of output features.
num_heads : int
Number of heads in Multi-Head Attention.
dropout :
Required for dropout of attn and feat in GATConv
batch_norm :
boolean flag for batch_norm layer.
residual :
If True, use residual connection inside this layer. Default: ``False``.
activation : callable activation function/layer or None, optional.
If not None, applies an activation function to the updated node features.
Using dgl builtin GATConv by default:
https://github.com/graphdeeplearning/benchmarking-gnns/commit/206e888ecc0f8d941c54e061d5dffcc7ae2142fc
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=False, activation=F.elu):
super().__init__()
self.residual = residual
self.activation = activation
self.batch_norm = batch_norm
if in_dim != (out_dim*num_heads):
self.residual = False
self.gatconv = GATConv(in_dim, out_dim, num_heads, dropout, dropout)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_dim * num_heads)
def forward(self, g, h):
h_in = h # for residual connection
h = self.gatconv(g, h).flatten(1)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
return h
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class CustomGATHeadLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
a[torch.isinf(a)] = 1e+9 # clamping
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
alpha = F.dropout(alpha, self.dropout, training=self.training)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
h[torch.isinf(h)] = 1e+9 # clamping
return {'h': h}
def forward(self, g, h):
z = self.fc(h)
z[torch.isinf(z)] = 1e+9 # clamping
g.ndata['z'] = z
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayer(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayer(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
##############################################################
class CustomGATHeadLayerEdgeReprFeat(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc_h = nn.Linear(in_dim, out_dim, bias=False)
self.fc_e = nn.Linear(in_dim, out_dim, bias=False)
self.fc_proj = nn.Linear(3* out_dim, out_dim)
self.attn_fc = nn.Linear(3* out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.batchnorm_e = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z = torch.cat([edges.data['z_e'], edges.src['z_h'], edges.dst['z_h']], dim=1)
e_proj = self.fc_proj(z)
attn = F.leaky_relu(self.attn_fc(z))
return {'attn': attn, 'e_proj': e_proj}
def message_func(self, edges):
return {'z': edges.src['z_h'], 'attn': edges.data['attn']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['attn'], dim=1)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h, e):
z_h = self.fc_h(h)
z_e = self.fc_e(e)
g.ndata['z_h'] = z_h
g.edata['z_e'] = z_e
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
e = g.edata['e_proj']
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
h = F.elu(h)
e = F.elu(e)
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
class CustomGATLayerEdgeReprFeat(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayerEdgeReprFeat(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
head_outs_h = []
head_outs_e = []
for attn_head in self.heads:
h_temp, e_temp = attn_head(g, h, e)
head_outs_h.append(h_temp)
head_outs_e.append(e_temp)
if self.merge == 'cat':
h = torch.cat(head_outs_h, dim=1)
e = torch.cat(head_outs_e, dim=1)
else:
raise NotImplementedError
if self.residual:
h = h_in + h # residual connection
e = e_in + e
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
##############################################################
class CustomGATHeadLayerIsotropic(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def message_func(self, edges):
return {'z': edges.src['z']}
def reduce_func(self, nodes):
h = torch.sum(nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h):
z = self.fc(h)
g.ndata['z'] = z
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayerIsotropic(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayerIsotropic(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
| 10,429 | 29.949555 | 107 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/bi_gated_gcn_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class biGatedGCNLayer(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, assign_dim, sigma=1, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
self.assign_dim = assign_dim
self.sigma = sigma
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.s1 = nn.Linear(input_dim, output_dim, bias=True)
self.s2 = nn.Linear(output_dim, self.assign_dim, bias=True)
self.metric = nn.Linear( self.assign_dim, self.assign_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.ndata['Sh'] = F.softmax(self.s2(F.relu(self.s1(h))),dim=1) # soft assignment
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.apply_edges(fn.u_add_v('Dh', 'Eh', 'DEh'))
g.edata['e'] = g.edata['DEh'] + g.edata['Ce']
g.edata['sigma_e'] = torch.sigmoid(g.edata['e'])
g.apply_edges(fn.u_sub_v('Sh', 'Sh', 'Sd')) # for cluster distance: (si - sj)
Sd = g.edata['Sd'] # sum_edges, assign_dim
Sd_h = self.metric(Sd) # sum_edges, assign_dim; D = sqrt( (si - sj) W W^t (si - sj)^t )
#dense_D = torch.matmul(Sd_h,Sd_h.transpose(0,1)) # sum_edges, sum_edges
#D = torch.sqrt(torch.einsum("ii->i",dense_D).unsqueeze(1)) # sum_edges, 1
D = torch.sqrt(torch.sum(Sd_h*Sd_h,dim=1)).unsqueeze(1) # sum_edges, 1
g.edata['GD'] = torch.exp( -D / ( 2*(self.sigma**2) ) ) # sum_edges, 1 # G = GaussianRBF(D)
g.edata['sigma_GD'] = torch.sigmoid(g.edata['GD'])
g.edata['sigma'] = g.edata['sigma_e'] * g.edata['sigma_GD']
g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'), fn.sum('m', 'sum_sigma_h'))
g.update_all(fn.copy_e('sigma_e', 'm'), fn.sum('m', 'sum_sigma_e'))
g.update_all(fn.copy_e('sigma_GD', 'm'), fn.sum('m', 'sum_sigma_GD'))
g.ndata['h'] = ( g.ndata['Ah'] + g.ndata['sum_sigma_h'] /
((g.ndata['sum_sigma_e'] + 1e-6) * (g.ndata['sum_sigma_GD'] + 1e-6)))
#g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e, g.ndata['Sh']
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
class biGatedGCNLayer2(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, assign_dim, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
self.assign_dim = assign_dim
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.s1 = nn.Linear(input_dim, output_dim, bias=True)
self.s2 = nn.Linear(output_dim, self.assign_dim, bias=True)
self.Q = nn.Linear(self.assign_dim, output_dim, bias=True)
self.K = nn.Linear(self.assign_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def scaledDP(self, edges):
return {'att': torch.sum(edges.data['Skq'],dim=1) / edges.src['Sk'].shape[1] }
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.ndata['Sh'] = F.softmax(self.s2(F.relu(self.s1(h)))) # soft assignment
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.apply_edges(fn.u_add_v('Dh', 'Eh', 'DEh'))
g.edata['e'] = g.edata['DEh'] + g.edata['Ce']
g.edata['ehat'] = torch.sigmoid(g.edata['e'])
Sh = g.ndata['Sh']
g.ndata['Sq'] = self.Q(Sh) # quary for dst
g.ndata['Sk'] = self.K(Sh) # key for src
g.apply_edges(fn.u_mul_v('Sk', 'Sq', 'Skq'))
g.apply_edges(self.scaledDP) # scaled-dop product
g.edata['sigma'] = g.edata['ehat'] * g.edata['att'].unsqueeze(1) # bilateral gating
g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'), fn.sum('m', 'sum_sigma_h'))
g.update_all(fn.copy_e('sigma', 'm'), fn.sum('m', 'sum_sigma'))
g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_sigma_h'] / (g.ndata['sum_sigma'] + 1e-6)
#g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e, Sh
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
class biGatedGCNLayer3(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, assign_dim, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
self.assign_dim = assign_dim
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.s1 = nn.Linear(input_dim, output_dim, bias=True)
self.s2 = nn.Linear(output_dim, self.assign_dim, bias=True)
self.F = nn.Parameter(torch.FloatTensor(size=(self.assign_dim * 2, output_dim)))
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def concat_message_function(self, edges):
return {'cSh': torch.cat([edges.src['Sh'], edges.dst['Sh']],dim=1)}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.ndata['Sh'] = F.softmax(self.s2(F.relu(self.s1(h)))) # soft assignment
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.apply_edges(self.concat_message_function) # concat the src&dst soft assignments
g.edata['Se'] = g.edata['cSh'] @ self.F # update
g.apply_edges(fn.u_add_v('Dh', 'Eh', 'DEh'))
g.edata['e'] = g.edata['DEh'] + g.edata['Ce'] + g.edata['Se'] # edge feat considering the soft assignments
g.edata['sigma'] = torch.sigmoid(g.edata['e'])
g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'), fn.sum('m', 'sum_sigma_h'))
g.update_all(fn.copy_e('sigma', 'm'), fn.sum('m', 'sum_sigma'))
g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_sigma_h'] / (g.ndata['sum_sigma'] + 1e-6)
#g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e, g.ndata['Sh']
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
class biGatedGCNLayer4(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, assign_dim, sigma=1, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
self.assign_dim = assign_dim
self.sigma = sigma
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.s1 = nn.Linear(input_dim, output_dim, bias=True)
self.s2 = nn.Linear(output_dim, self.assign_dim, bias=True)
self.metric = nn.Linear( self.assign_dim, self.assign_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.ndata['Sh'] = F.softmax(self.s2(F.relu(self.s1(h))),dim=1) # soft assignment
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.apply_edges(fn.u_add_v('Dh', 'Eh', 'DEh'))
g.edata['e'] = g.edata['DEh'] + g.edata['Ce']
g.edata['sigma_e'] = torch.sigmoid(g.edata['e'])
g.apply_edges(fn.u_sub_v('Sh', 'Sh', 'Sd')) # for cluster distance: (si - sj)
Sd = g.edata['Sd'] # sum_edges, assign_dim
Sd_h = self.metric(Sd) # sum_edges, assign_dim; D = sqrt( (si - sj) W W^t (si - sj)^t )
#dense_D = torch.matmul(Sd_h,Sd_h.transpose(0,1)) # sum_edges, sum_edges
#D = torch.sqrt(torch.einsum("ii->i",dense_D).unsqueeze(1)) # sum_edges, 1
D = torch.sqrt(torch.sum(Sd_h*Sd_h,dim=1)).unsqueeze(1) # sum_edges, 1
g.edata['GD'] = torch.exp( -D / ( 2*(self.sigma**2) ) ) # sum_edges, 1 # G = GaussianRBF(D)
g.edata['sigma_GD'] = torch.sigmoid(g.edata['GD'])
g.edata['sigma'] = g.edata['sigma_e'] * g.edata['sigma_GD']
g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'), fn.sum('m', 'sum_sigma_h'))
g.update_all(fn.copy_e('sigma_e', 'm'), fn.sum('m', 'sum_sigma_e'))
g.update_all(fn.copy_e('sigma_GD', 'm'), fn.sum('m', 'sum_sigma_GD'))
g.ndata['h'] = ( g.ndata['Ah'] + g.ndata['sum_sigma_h'] /
((g.ndata['sum_sigma_e'] + 1e-6) * (g.ndata['sum_sigma_GD'] + 1e-6)))
#g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e, g.ndata['Sh']
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels) | 14,818 | 41.583333 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/gcn_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl.nn.pytorch import GraphConv
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
# Sends a message of node feature h
# Equivalent to => return {'m': edges.src['h']}
msg = fn.copy_src(src='h', out='m')
reduce = fn.mean('m', 'h')
class NodeApplyModule(nn.Module):
# Update node feature h_v with (Wh_v+b)
def __init__(self, in_dim, out_dim):
super().__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, node):
h = self.linear(node.data['h'])
return {'h': h}
class GCNLayer(nn.Module):
"""
Param: [in_dim, out_dim]
"""
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, residual=False, dgl_builtin=False):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
if in_dim != out_dim:
self.residual = False
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.activation = activation
self.dropout = nn.Dropout(dropout)
if self.dgl_builtin == False:
self.apply_mod = NodeApplyModule(in_dim, out_dim)
elif dgl.__version__ < "0.5":
self.conv = GraphConv(in_dim, out_dim)
else:
self.conv = GraphConv(in_dim, out_dim, allow_zero_in_degree=True)
def forward(self, g, feature):
h_in = feature # to be used for residual connection
if self.dgl_builtin == False:
g.ndata['h'] = feature
g.update_all(msg, reduce)
g.apply_nodes(func=self.apply_mod)
h = g.ndata['h'] # result of graph convolution
else:
h = self.conv(g, feature)
if self.batch_norm:
h = self.batchnorm_h(h) # batch normalization
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
h = self.dropout(h)
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.residual) | 2,624 | 31.012195 | 109 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/bi_gat_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import GATConv
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
class biGATHeadLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=True)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=True)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
a[torch.isinf(a)] = 1e+9 # clamping
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e'] , 'sigma_GD': edges.data['sigma_GD']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
beta_sum = torch.sum(nodes.mailbox['sigma_GD'], dim=1).unsqueeze(-1)
beta = nodes.mailbox['sigma_GD'] / (beta_sum + 1e-6)
alpha = F.dropout(alpha, self.dropout, training=self.training)
h = torch.sum(alpha * beta * nodes.mailbox['z'], dim=1)
h[torch.isinf(h)] = 1e+9 # clamping
return {'h': h}
def forward(self, g, h):
z = self.fc(h)
z[torch.isinf(z)] = 1e+9 # clamping
g.ndata['z'] = z
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class biGATLayer(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, assign_dim, sigma=1, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
self.assign_dim = assign_dim
self.sigma = sigma
self.s1 = nn.Linear(in_dim, out_dim, bias=True)
self.s2 = nn.Linear(out_dim, self.assign_dim, bias=True)
self.metric = nn.Linear( self.assign_dim, self.assign_dim, bias=True)
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(biGATHeadLayer(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h):
h_in = h # for residual connection
g.ndata['Sh'] = F.softmax(self.s2(F.relu(self.s1(h))), dim=1) # soft assignment
g.apply_edges(fn.u_sub_v('Sh', 'Sh', 'Sd')) # for cluster distance: (si - sj)
Sd = g.edata['Sd'] # sum_edges, assign_dim
Sd_h = F.relu(self.metric(Sd)) # sum_edges, assign_dim; D = sqrt( (si - sj) W W^t (si - sj)^t )
#dense_D = torch.matmul(Sd_h,Sd_h.transpose(0,1)) # sum_edges, sum_edges
#D = torch.sqrt(torch.einsum("ii->i",dense_D).unsqueeze(1)) # sum_edges, 1
D = torch.sqrt(torch.sum(Sd_h*Sd_h,dim=1)+ 1e-09).unsqueeze(1) # sum_edges, 1
g.edata['GD'] = torch.exp( (-D / ( 2*(self.sigma**2) ))+ 1e-09) # sum_edges, 1 # G = GaussianRBF(D)
g.edata['sigma_GD'] = torch.sigmoid(g.edata['GD'])
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, g.ndata['Sh']
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
| 4,247 | 35.307692 | 108 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/layers/bi_gcn_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl.nn.pytorch import GraphConv
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
class NodeApplyModule(nn.Module):
# Update node feature h_v with (Wh_v+b)
def __init__(self, in_dim, out_dim):
super().__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, node):
h = self.linear(node.data['h'])
return {'h': h}
class biGCNLayer(nn.Module):
"""
Param: [in_dim, out_dim]
"""
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, assign_dim, sigma=1, residual=False):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.batch_norm = batch_norm
self.residual = residual
self.assign_dim = assign_dim
self.sigma = sigma
if in_dim != out_dim:
self.residual = False
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.activation = activation
self.dropout = nn.Dropout(dropout)
self.apply_mod = NodeApplyModule(in_dim, out_dim)
self.s1 = nn.Linear(in_dim, out_dim, bias=True)
self.s2 = nn.Linear(out_dim, self.assign_dim, bias=True)
self.metric = nn.Linear( self.assign_dim, self.assign_dim, bias=True)
def forward(self, g, feature):
h_in = feature # to be used for residual connection
g.ndata['h'] = feature
g.ndata['Sh'] = F.softmax(self.s2(F.relu(self.s1(feature))),dim=1) # soft assignment
g.apply_edges(fn.u_sub_v('Sh', 'Sh', 'Sd')) # for cluster distance: (si - sj)
Sd = g.edata['Sd'] # sum_edges, assign_dim
Sd_h = self.metric(Sd) # sum_edges, assign_dim; D = sqrt( (si - sj) W W^t (si - sj)^t )
D = torch.sqrt(torch.sum(Sd_h*Sd_h,dim=1)).unsqueeze(1) # sum_edges, 1
g.edata['GD'] = torch.exp( -D / ( 2*(self.sigma**2) ) ) # sum_edges, 1 # G = GaussianRBF(D)
g.edata['sigma_GD'] = torch.sigmoid(g.edata['GD'])
g.update_all(fn.u_mul_e('h', 'sigma_GD', 'm'), fn.mean('m', 'sum_sigma_GD_h'))
g.update_all(fn.copy_e('sigma_GD', 'm'), fn.sum('m', 'sum_sigma_GD'))
g.ndata['h'] = (g.ndata['sum_sigma_GD_h'] / (g.ndata['sum_sigma_GD'] + 1e-6))
g.apply_nodes(func=self.apply_mod)
h = g.ndata['h'] # result of graph convolution
if self.batch_norm:
h = self.batchnorm_h(h) # batch normalization
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
h = self.dropout(h)
return h, g.ndata['Sh']
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.residual) | 3,193 | 37.02381 | 110 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/train/train_superpixels_graph_classification.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
from train.metrics import accuracy_MNIST_CIFAR as accuracy
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, mode):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
optimizer.zero_grad()
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.3)
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, mode):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc
"""
For WL-GNNs
"""
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
optimizer.zero_grad()
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
loss.backward()
if not (iter%batch_size):
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(scores, labels)
nb_data += labels.size(0)
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_dense(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(scores, labels)
nb_data += labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc | 4,114 | 32.729508 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/train/train_molecules_graph_regression.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
from train.metrics import MAE
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, mode):
model.train()
epoch_loss = 0
epoch_train_mae = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_targets) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_targets = batch_targets.to(device)
optimizer.zero_grad()
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
if model.sg_flag:
loss = model.sup_loss(batch_scores, batch_targets) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
loss = model.loss(batch_scores, batch_targets)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_mae += MAE(batch_scores, batch_targets)
nb_data += batch_targets.size(0)
epoch_loss /= (iter + 1)
epoch_train_mae /= (iter + 1)
return epoch_loss, epoch_train_mae, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, mode):
model.eval()
epoch_test_loss = 0
epoch_test_mae = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_targets) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_targets = batch_targets.to(device)
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
if model.sg_flag:
loss = model.sup_loss(batch_scores, batch_targets) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
loss = model.loss(batch_scores, batch_targets)
epoch_test_loss += loss.detach().item()
epoch_test_mae += MAE(batch_scores, batch_targets)
nb_data += batch_targets.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_mae /= (iter + 1)
return epoch_test_loss, epoch_test_mae
"""
For WL-GNNs
"""
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
model.train()
epoch_loss = 0
epoch_train_mae = 0
nb_data = 0
gpu_mem = 0
optimizer.zero_grad()
for iter, (x_no_edge_feat, x_with_edge_feat, targets) in enumerate(data_loader):
if x_no_edge_feat is not None:
x_no_edge_feat = x_no_edge_feat.to(device)
if x_with_edge_feat is not None:
x_with_edge_feat = x_with_edge_feat.to(device)
targets = targets.to(device)
scores = model.forward(x_no_edge_feat, x_with_edge_feat)
loss = model.loss(scores, targets)
loss.backward()
if not (iter%batch_size):
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.detach().item()
epoch_train_mae += MAE(scores, targets)
nb_data += targets.size(0)
epoch_loss /= (iter + 1)
epoch_train_mae /= (iter + 1)
return epoch_loss, epoch_train_mae, optimizer
def evaluate_network_dense(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_mae = 0
nb_data = 0
with torch.no_grad():
for iter, (x_no_edge_feat, x_with_edge_feat, targets) in enumerate(data_loader):
if x_no_edge_feat is not None:
x_no_edge_feat = x_no_edge_feat.to(device)
if x_with_edge_feat is not None:
x_with_edge_feat = x_with_edge_feat.to(device)
targets = targets.to(device)
scores = model.forward(x_no_edge_feat, x_with_edge_feat)
loss = model.loss(scores, targets)
epoch_test_loss += loss.detach().item()
epoch_test_mae += MAE(scores, targets)
nb_data += targets.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_mae /= (iter + 1)
return epoch_test_loss, epoch_test_mae | 5,399 | 37.297872 | 115 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/train/train_TSP_edge_classification.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from train.metrics import binary_f1_score
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, mode):
model.train()
epoch_loss = 0
epoch_train_f1 = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
optimizer.zero_grad()
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_f1 += binary_f1_score(batch_scores, batch_labels)
epoch_loss /= (iter + 1)
epoch_train_f1 /= (iter + 1)
return epoch_loss, epoch_train_f1, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, mode):
model.eval()
epoch_test_loss = 0
epoch_test_f1 = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_f1 += binary_f1_score(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_f1 /= (iter + 1)
return epoch_test_loss, epoch_test_f1
"""
For WL-GNNs
"""
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
model.train()
epoch_loss = 0
epoch_train_f1 = 0
nb_data = 0
gpu_mem = 0
optimizer.zero_grad()
for iter, (x_no_edge_feat, x_with_edge_feat, labels, edge_list) in enumerate(data_loader):
if x_no_edge_feat is not None:
x_no_edge_feat = x_no_edge_feat.to(device)
if x_with_edge_feat is not None:
x_with_edge_feat = x_with_edge_feat.to(device)
labels = labels.to(device)
edge_list = edge_list[0].to(device), edge_list[1].to(device)
scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
loss = model.loss(scores, labels)
loss.backward()
if not (iter%batch_size):
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.detach().item()
epoch_train_f1 += binary_f1_score(scores, labels)
epoch_loss /= (iter + 1)
epoch_train_f1 /= (iter + 1)
return epoch_loss, epoch_train_f1, optimizer
def evaluate_network_dense(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_f1 = 0
nb_data = 0
with torch.no_grad():
for iter, (x_no_edge_feat, x_with_edge_feat, labels, edge_list) in enumerate(data_loader):
if x_no_edge_feat is not None:
x_no_edge_feat = x_no_edge_feat.to(device)
if x_with_edge_feat is not None:
x_with_edge_feat = x_with_edge_feat.to(device)
labels = labels.to(device)
edge_list = edge_list[0].to(device), edge_list[1].to(device)
scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
loss = model.loss(scores, labels)
epoch_test_loss += loss.detach().item()
epoch_test_f1 += binary_f1_score(scores, labels)
epoch_test_loss /= (iter + 1)
epoch_test_f1 /= (iter + 1)
return epoch_test_loss, epoch_test_f1 | 4,434 | 33.115385 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/train/metrics.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import numpy as np
def MAE(scores, targets):
MAE = F.l1_loss(scores, targets)
MAE = MAE.detach().item()
return MAE
def accuracy_TU(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
return acc
def accuracy_MNIST_CIFAR(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
return acc
def accuracy_CITATION_GRAPH(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
acc = acc / len(targets)
return acc
def accuracy_SBM(scores, targets):
S = targets.cpu().numpy()
C = np.argmax( torch.nn.Softmax(dim=1)(scores).cpu().detach().numpy() , axis=1 )
CM = confusion_matrix(S,C).astype(np.float32)
nb_classes = CM.shape[0]
targets = targets.cpu().detach().numpy()
nb_non_empty_classes = 0
pr_classes = np.zeros(nb_classes)
for r in range(nb_classes):
cluster = np.where(targets==r)[0]
if cluster.shape[0] != 0:
pr_classes[r] = CM[r,r]/ float(cluster.shape[0])
if CM[r,r]>0:
nb_non_empty_classes += 1
else:
pr_classes[r] = 0.0
acc = 100.* np.sum(pr_classes)/ float(nb_classes)
return acc
def binary_f1_score(scores, targets):
"""Computes the F1 score using scikit-learn for binary class labels.
Returns the F1 score for the positive class, i.e. labelled '1'.
"""
y_true = targets.cpu().numpy()
y_pred = scores.argmax(dim=1).cpu().numpy()
return f1_score(y_true, y_pred, average='binary')
def accuracy_VOC(scores, targets):
scores = scores.detach().argmax(dim=1).cpu()
targets = targets.cpu().detach().numpy()
acc = f1_score(scores, targets, average='weighted')
return acc
| 1,988 | 27.826087 | 84 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/train/train_TUs_graph_classification.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
from train.metrics import accuracy_TU as accuracy
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, mode):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
optimizer.zero_grad()
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs,s_scores, mode)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, mode):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs,s_scores, mode)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc
"""
For WL-GNNs
"""
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
optimizer.zero_grad()
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
loss.backward()
if not (iter%batch_size):
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(scores, labels)
nb_data += labels.size(0)
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_dense(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(scores, labels)
nb_data += labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc
def check_patience(all_losses, best_loss, best_epoch, curr_loss, curr_epoch, counter):
if curr_loss < best_loss:
counter = 0
best_loss = curr_loss
best_epoch = curr_epoch
else:
counter += 1
return best_loss, best_epoch, counter | 4,314 | 32.192308 | 113 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/train/train_SBMs_node_classification_for_Eval.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from train.metrics import accuracy_SBM as accuracy
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, mode):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
optimizer.zero_grad()
if model.sg_flag:
batch_scores, features, s_scores = model.forward(batch_graphs, batch_x, batch_e)
else:
batch_scores, features = model.forward(batch_graphs, batch_x, batch_e)
if model.sg_flag:
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, mode):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
epoch_features = []
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
if model.sg_flag:
batch_scores, features, s_scores = model.forward(batch_graphs, batch_x, batch_e)
else:
batch_scores, features = model.forward(batch_graphs, batch_x, batch_e)
if model.sg_flag:
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
epoch_features.append(features)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc, epoch_features
"""
For WL-GNNs
"""
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
optimizer.zero_grad()
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
loss.backward()
if not (iter%batch_size):
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(scores, labels)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_dense(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(scores, labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
| 4,130 | 30.295455 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/train/train_SBMs_node_classification.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from train.metrics import accuracy_SBM as accuracy
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, mode):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
optimizer.zero_grad()
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
if model.sg_flag:
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, mode):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
if model.sg_flag:
batch_scores, s_scores = model.forward(batch_graphs, batch_x, batch_e)
else:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
if model.sg_flag:
loss = model.sup_loss(batch_scores, batch_labels) + model.unsup_loss(batch_graphs, s_scores, mode)
else:
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
"""
For WL-GNNs
"""
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
optimizer.zero_grad()
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
loss.backward()
if not (iter%batch_size):
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(scores, labels)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_dense(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (x_with_node_feat, labels) in enumerate(data_loader):
x_with_node_feat = x_with_node_feat.to(device)
labels = labels.to(device)
scores = model.forward(x_with_node_feat)
loss = model.loss(scores, labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(scores, labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
| 4,940 | 33.3125 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/data/TSP.py | import time
import pickle
import numpy as np
import itertools
from scipy.spatial.distance import pdist, squareform
import dgl
import torch
from torch.utils.data import Dataset
class TSP(Dataset):
def __init__(self, data_dir, split="train", num_neighbors=25, max_samples=10000):
self.data_dir = data_dir
self.split = split
self.filename = f'{data_dir}/tsp50-500_{split}.txt'
self.max_samples = max_samples
self.num_neighbors = num_neighbors
self.is_test = split.lower() in ['test', 'val']
self.graph_lists = []
self.edge_labels = []
self._prepare()
self.n_samples = len(self.edge_labels)
def _prepare(self):
print('preparing all graphs for the %s set...' % self.split.upper())
file_data = open(self.filename, "r").readlines()[:self.max_samples]
for graph_idx, line in enumerate(file_data):
line = line.split(" ") # Split into list
num_nodes = int(line.index('output')//2)
# Convert node coordinates to required format
nodes_coord = []
for idx in range(0, 2 * num_nodes, 2):
nodes_coord.append([float(line[idx]), float(line[idx + 1])])
# Compute distance matrix
W_val = squareform(pdist(nodes_coord, metric='euclidean'))
# Determine k-nearest neighbors for each node
knns = np.argpartition(W_val, kth=self.num_neighbors, axis=-1)[:, self.num_neighbors::-1]
# Convert tour nodes to required format
# Don't add final connection for tour/cycle
tour_nodes = [int(node) - 1 for node in line[line.index('output') + 1:-1]][:-1]
# Compute an edge adjacency matrix representation of tour
edges_target = np.zeros((num_nodes, num_nodes))
for idx in range(len(tour_nodes) - 1):
i = tour_nodes[idx]
j = tour_nodes[idx + 1]
edges_target[i][j] = 1
edges_target[j][i] = 1
# Add final connection of tour in edge target
edges_target[j][tour_nodes[0]] = 1
edges_target[tour_nodes[0]][j] = 1
# Construct the DGL graph
g = dgl.DGLGraph()
g.add_nodes(num_nodes)
g.ndata['feat'] = torch.Tensor(nodes_coord)
edge_feats = [] # edge features i.e. euclidean distances between nodes
edge_labels = [] # edges_targets as a list
# Important!: order of edge_labels must be the same as the order of edges in DGLGraph g
# We ensure this by adding them together
for idx in range(num_nodes):
for n_idx in knns[idx]:
if n_idx != idx: # No self-connection
g.add_edge(idx, n_idx)
edge_feats.append(W_val[idx][n_idx])
edge_labels.append(int(edges_target[idx][n_idx]))
# dgl.transform.remove_self_loop(g)
# Sanity check
assert len(edge_feats) == g.number_of_edges() == len(edge_labels)
# Add edge features
g.edata['feat'] = torch.Tensor(edge_feats).unsqueeze(-1)
# # Uncomment to add dummy edge features instead (for Residual Gated ConvNet)
# edge_feat_dim = g.ndata['feat'].shape[1] # dim same as node feature dim
# g.edata['feat'] = torch.ones(g.number_of_edges(), edge_feat_dim)
self.graph_lists.append(g)
self.edge_labels.append(edge_labels)
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, list)
DGLGraph with node feature stored in `feat` field
And a list of labels for each edge in the DGLGraph.
"""
return self.graph_lists[idx], self.edge_labels[idx]
class TSPDatasetDGL(Dataset):
def __init__(self, name):
self.name = name
self.train = TSP(data_dir='./data/TSP', split='train', num_neighbors=25, max_samples=10000)
self.val = TSP(data_dir='./data/TSP', split='val', num_neighbors=25, max_samples=1000)
self.test = TSP(data_dir='./data/TSP', split='test', num_neighbors=25, max_samples=1000)
class TSPDataset(Dataset):
def __init__(self, name):
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/TSP/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.test = f[1]
self.val = f[2]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
# Edge classification labels need to be flattened to 1D lists
labels = torch.LongTensor(np.array(list(itertools.chain(*labels))))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, edge_feat):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
# Edge classification labels need to be flattened to 1D lists
labels = torch.LongTensor(np.array(list(itertools.chain(*labels))))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
in_node_dim = g.ndata['feat'].shape[1]
in_edge_dim = g.edata['feat'].shape[1]
if edge_feat:
# use edge feats also to prepare adj
adj_with_edge_feat = torch.stack([zero_adj for j in range(in_node_dim + in_edge_dim)])
adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
us, vs = g.edges()
for idx, edge_feat in enumerate(g.edata['feat']):
adj_with_edge_feat[1+in_node_dim:, us[idx], vs[idx]] = edge_feat
for node, node_feat in enumerate(g.ndata['feat']):
adj_with_edge_feat[1:1+in_node_dim, node, node] = node_feat
x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
return None, x_with_edge_feat, labels, g.edges()
else:
# use only node feats to prepare adj
adj_no_edge_feat = torch.stack([zero_adj for j in range(in_node_dim)])
adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
for node, node_feat in enumerate(g.ndata['feat']):
adj_no_edge_feat[1:1+in_node_dim, node, node] = node_feat
x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
return x_no_edge_feat, None, labels, g.edges()
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
"""
No self-loop support since TSP edge classification dataset.
"""
raise NotImplementedError
| 9,283 | 41.587156 | 127 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/data/superpixels.py | import os
import pickle
from scipy.spatial.distance import cdist
import numpy as np
import itertools
import dgl
import torch
import torch.utils.data
import time
import csv
from sklearn.model_selection import StratifiedShuffleSplit
def sigma(dists, kth=8):
# Compute sigma and reshape
try:
# Get k-nearest neighbors for each node
knns = np.partition(dists, kth, axis=-1)[:, kth::-1]
sigma = knns.sum(axis=1).reshape((knns.shape[0], 1))/kth
except ValueError: # handling for graphs with num_nodes less than kth
num_nodes = dists.shape[0]
# this sigma value is irrelevant since not used for final compute_edge_list
sigma = np.array([1]*num_nodes).reshape(num_nodes,1)
return sigma + 1e-8 # adding epsilon to avoid zero value of sigma
def compute_adjacency_matrix_images(coord, feat, use_feat=True, kth=8):
coord = coord.reshape(-1, 2)
# Compute coordinate distance
c_dist = cdist(coord, coord)
if use_feat:
# Compute feature distance
f_dist = cdist(feat, feat)
# Compute adjacency
A = np.exp(- (c_dist/sigma(c_dist))**2 - (f_dist/sigma(f_dist))**2 )
else:
A = np.exp(- (c_dist/sigma(c_dist))**2)
# Convert to symmetric matrix
A = 0.5 * (A + A.T)
A[np.diag_indices_from(A)] = 0
return A
def compute_edges_list(A, kth=8+1):
# Get k-similar neighbor indices for each node
num_nodes = A.shape[0]
new_kth = num_nodes - kth
if num_nodes > 9:
knns = np.argpartition(A, new_kth-1, axis=-1)[:, new_kth:-1]
knn_values = np.partition(A, new_kth-1, axis=-1)[:, new_kth:-1] # NEW
else:
# handling for graphs with less than kth nodes
# in such cases, the resulting graph will be fully connected
knns = np.tile(np.arange(num_nodes), num_nodes).reshape(num_nodes, num_nodes)
knn_values = A # NEW
# removing self loop
if num_nodes != 1:
knn_values = A[knns != np.arange(num_nodes)[:,None]].reshape(num_nodes,-1) # NEW
knns = knns[knns != np.arange(num_nodes)[:,None]].reshape(num_nodes,-1)
return knns, knn_values # NEW
class SuperPixDGL(torch.utils.data.Dataset):
def __init__(self,
data_dir,
dataset,
split,
use_mean_px=True,
use_coord=True):
self.split = split
self.graph_lists = []
if dataset == 'MNIST':
self.img_size = 28
with open(os.path.join(data_dir, 'mnist_75sp_%s.pkl' % split), 'rb') as f:
self.labels, self.sp_data = pickle.load(f)
self.graph_labels = torch.LongTensor(self.labels)
elif dataset == 'CIFAR10':
self.img_size = 32
with open(os.path.join(data_dir, 'cifar10_150sp_%s.pkl' % split), 'rb') as f:
self.labels, self.sp_data = pickle.load(f)
self.graph_labels = torch.LongTensor(self.labels)
self.use_mean_px = use_mean_px
self.use_coord = use_coord
self.n_samples = len(self.labels)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.n_samples, self.split.upper()))
self.Adj_matrices, self.node_features, self.edges_lists, self.edge_features = [], [], [], []
for index, sample in enumerate(self.sp_data):
mean_px, coord = sample[:2]
try:
coord = coord / self.img_size
except AttributeError:
VOC_has_variable_image_sizes = True
if self.use_mean_px:
A = compute_adjacency_matrix_images(coord, mean_px) # using super-pixel locations + features
else:
A = compute_adjacency_matrix_images(coord, mean_px, False) # using only super-pixel locations
edges_list, edge_values_list = compute_edges_list(A) # NEW
N_nodes = A.shape[0]
mean_px = mean_px.reshape(N_nodes, -1)
coord = coord.reshape(N_nodes, 2)
x = np.concatenate((mean_px, coord), axis=1)
edge_values_list = edge_values_list.reshape(-1) # NEW # TO DOUBLE-CHECK !
self.node_features.append(x)
self.edge_features.append(edge_values_list) # NEW
self.Adj_matrices.append(A)
self.edges_lists.append(edges_list)
for index in range(len(self.sp_data)):
g = dgl.DGLGraph()
g.add_nodes(self.node_features[index].shape[0])
g.ndata['feat'] = torch.Tensor(self.node_features[index]).half()
for src, dsts in enumerate(self.edges_lists[index]):
# handling for 1 node where the self loop would be the only edge
# since, VOC Superpixels has few samples (5 samples) with only 1 node
if self.node_features[index].shape[0] == 1:
g.add_edges(src, dsts)
else:
g.add_edges(src, dsts[dsts!=src])
# adding edge features for Residual Gated ConvNet
edge_feat_dim = g.ndata['feat'].shape[1] # dim same as node feature dim
#g.edata['feat'] = torch.ones(g.number_of_edges(), edge_feat_dim).half()
g.edata['feat'] = torch.Tensor(self.edge_features[index]).unsqueeze(1).half() # NEW
self.graph_lists.append(g)
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.graph_labels[idx]
class DGLFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.graph_lists = lists[0]
self.graph_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
class SuperPixDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name, num_val=5000):
"""
Takes input standard image dataset name (MNIST/CIFAR10)
and returns the superpixels graph.
This class uses results from the above SuperPix class.
which contains the steps for the generation of the Superpixels
graph from a superpixel .pkl file that has been given by
https://github.com/bknyaz/graph_attention_pool
Please refer the SuperPix class for details.
"""
t_data = time.time()
self.name = name
use_mean_px = True # using super-pixel locations + features
use_mean_px = False # using only super-pixel locations
if use_mean_px:
print('Adj matrix defined from super-pixel locations + features')
else:
print('Adj matrix defined from super-pixel locations (only)')
use_coord = True
self.test = SuperPixDGL("./data/superpixels", dataset=self.name, split='test',
use_mean_px=use_mean_px,
use_coord=use_coord)
self.train_ = SuperPixDGL("./data/superpixels", dataset=self.name, split='train',
use_mean_px=use_mean_px,
use_coord=use_coord)
_val_graphs, _val_labels = self.train_[:num_val]
_train_graphs, _train_labels = self.train_[num_val:]
self.val = DGLFormDataset(_val_graphs, _val_labels)
self.train = DGLFormDataset(_train_graphs, _train_labels)
print("[I] Data load time: {:.4f}s".format(time.time()-t_data))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in SuperPixDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
class SuperPixDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading Superpixels datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/superpixels/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
for idx, graph in enumerate(graphs):
graphs[idx].ndata['feat'] = graph.ndata['feat'].float()
graphs[idx].edata['feat'] = graph.edata['feat'].float()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
in_dim = g.ndata['feat'].shape[1]
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['feat']):
adj_node_feat[1:, node, node] = node_feat
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
self.train = DGLFormDataset(self.train.graph_lists, self.train.graph_labels)
self.val = DGLFormDataset(self.val.graph_lists, self.val.graph_labels)
self.test = DGLFormDataset(self.test.graph_lists, self.test.graph_labels)
| 13,741 | 37.385475 | 127 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/data/molecules.py | import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
import csv
import dgl
from scipy import sparse as sp
import numpy as np
# *NOTE
# The dataset pickle and index files are in ./zinc_molecules/ dir
# [<split>.pickle and <split>.index; for split 'train', 'val' and 'test']
class MoleculeDGL(torch.utils.data.Dataset):
def __init__(self, data_dir, split, num_graphs=None):
self.data_dir = data_dir
self.split = split
self.num_graphs = num_graphs
with open(data_dir + "/%s.pickle" % self.split,"rb") as f:
self.data = pickle.load(f)
if self.num_graphs in [10000, 1000]:
# loading the sampled indices from file ./zinc_molecules/<split>.index
with open(data_dir + "/%s.index" % self.split,"r") as f:
data_idx = [list(map(int, idx)) for idx in csv.reader(f)]
self.data = [ self.data[i] for i in data_idx[0] ]
assert len(self.data)==num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k"
"""
data is a list of Molecule dict objects with following attributes
molecule = data[idx]
; molecule['num_atom'] : nb of atoms, an integer (N)
; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type
; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type
; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable
"""
self.graph_lists = []
self.graph_labels = []
self.n_samples = len(self.data)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper()))
for molecule in self.data:
node_features = molecule['atom_type'].long()
adj = molecule['bond_type']
edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list
edge_idxs_in_adj = edge_list.split(1, dim=1)
edge_features = adj[edge_idxs_in_adj].reshape(-1).long()
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(molecule['num_atom'])
g.ndata['feat'] = node_features
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
g.edata['feat'] = edge_features
self.graph_lists.append(g)
self.graph_labels.append(molecule['logP_SA_cycle_normalized'])
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.graph_labels[idx]
class MoleculeDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name='Zinc'):
t0 = time.time()
self.name = name
self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well
self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well
data_dir='./data/molecules'
if self.name == 'ZINC-full':
data_dir='./data/molecules/zinc_full'
self.train = MoleculeDGL(data_dir, 'train', num_graphs=220011)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=24445)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=5000)
else:
self.train = MoleculeDGL(data_dir, 'train', num_graphs=10000)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=1000)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=1000)
print("Time taken: {:.4f}s".format(time.time()-t0))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in MoleculeDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
# # Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
# EigVec = EigVec[:, EigVal.argsort()] # increasing order
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class MoleculeDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/molecules/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
self.num_atom_type = f[3]
self.num_bond_type = f[4]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, edge_feat):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if edge_feat:
# use edge feats also to prepare adj
adj_with_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type + self.num_bond_type)])
adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
us, vs = g.edges()
for idx, edge_label in enumerate(g.edata['feat']):
adj_with_edge_feat[edge_label.item()+1+self.num_atom_type][us[idx]][vs[idx]] = 1
for node, node_label in enumerate(g.ndata['feat']):
adj_with_edge_feat[node_label.item()+1][node][node] = 1
x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
return None, x_with_edge_feat, labels
else:
# use only node feats to prepare adj
adj_no_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type)])
adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_no_edge_feat[node_label.item()+1][node][node] = 1
x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
return x_no_edge_feat, None, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
| 11,339 | 38.65035 | 127 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/data/TUs.py | import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
import csv
import dgl
from dgl.data import TUDataset
from dgl.data import LegacyTUDataset
import random
random.seed(42)
from sklearn.model_selection import StratifiedKFold, train_test_split
import csv
def get_all_split_idx(dataset):
"""
- Split total number of graphs into 3 (train, val and test) in 80:10:10
- Stratified split proportionate to original distribution of data with respect to classes
- Using sklearn to perform the split and then save the indexes
- Preparing 10 such combinations of indexes split to be used in Graph NNs
- As with KFold, each of the 10 fold have unique test set.
"""
root_idx_dir = './data/TUs/'
if not os.path.exists(root_idx_dir):
os.makedirs(root_idx_dir)
all_idx = {}
# If there are no idx files, do the split and store the files
if not (os.path.exists(root_idx_dir + dataset.name + '_train.index')):
print("[!] Splitting the data into train/val/test ...")
# Using 10-fold cross val to compare with benchmark papers
k_splits = 10
cross_val_fold = StratifiedKFold(n_splits=k_splits, shuffle=True)
k_data_splits = []
# this is a temporary index assignment, to be used below for val splitting
for i in range(len(dataset.graph_lists)):
dataset[i][0].a = lambda: None
setattr(dataset[i][0].a, 'index', i)
for indexes in cross_val_fold.split(dataset.graph_lists, dataset.graph_labels):
remain_index, test_index = indexes[0], indexes[1]
remain_set = format_dataset([dataset[index] for index in remain_index])
# Gets final 'train' and 'val'
train, val, _, __ = train_test_split(remain_set,
range(len(remain_set.graph_lists)),
test_size=0.111,
stratify=remain_set.graph_labels)
train, val = format_dataset(train), format_dataset(val)
test = format_dataset([dataset[index] for index in test_index])
# Extracting only idxs
idx_train = [item[0].a.index for item in train]
idx_val = [item[0].a.index for item in val]
idx_test = [item[0].a.index for item in test]
f_train_w = csv.writer(open(root_idx_dir + dataset.name + '_train.index', 'a+'))
f_val_w = csv.writer(open(root_idx_dir + dataset.name + '_val.index', 'a+'))
f_test_w = csv.writer(open(root_idx_dir + dataset.name + '_test.index', 'a+'))
f_train_w.writerow(idx_train)
f_val_w.writerow(idx_val)
f_test_w.writerow(idx_test)
print("[!] Splitting done!")
# reading idx from the files
for section in ['train', 'val', 'test']:
with open(root_idx_dir + dataset.name + '_'+ section + '.index', 'r') as f:
reader = csv.reader(f)
all_idx[section] = [list(map(int, idx)) for idx in reader]
return all_idx
class DGLFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.graph_lists = lists[0]
self.graph_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in TUsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
class TUsDataset(torch.utils.data.Dataset):
def __init__(self, name):
t0 = time.time()
self.name = name
#dataset = TUDataset(self.name, hidden_size=1)
dataset = LegacyTUDataset(self.name, hidden_size=1) # dgl 4.0
# frankenstein has labels 0 and 2; so correcting them as 0 and 1
if self.name == "FRANKENSTEIN":
dataset.graph_labels = np.array([1 if x==2 else x for x in dataset.graph_labels])
print("[!] Dataset: ", self.name)
# this function splits data into train/val/test and returns the indices
self.all_idx = get_all_split_idx(dataset)
self.all = dataset
self.train = [self.format_dataset([dataset[idx] for idx in self.all_idx['train'][split_num]]) for split_num in range(10)]
self.val = [self.format_dataset([dataset[idx] for idx in self.all_idx['val'][split_num]]) for split_num in range(10)]
self.test = [self.format_dataset([dataset[idx] for idx in self.all_idx['test'][split_num]]) for split_num in range(10)]
print("Time taken: {:.4f}s".format(time.time()-t0))
def format_dataset(self, dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
graphs = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
for graph in graphs:
#graph.ndata['feat'] = torch.FloatTensor(graph.ndata['feat'])
graph.ndata['feat'] = graph.ndata['feat'].float() # dgl 4.0
# adding edge features for Residual Gated ConvNet, if not there
if 'feat' not in graph.edata.keys():
edge_feat_dim = graph.ndata['feat'].shape[1] # dim same as node feature dim
graph.edata['feat'] = torch.ones(graph.number_of_edges(), edge_feat_dim)
return DGLFormDataset(graphs, labels)
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
in_dim = g.ndata['feat'].shape[1]
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['feat']):
adj_node_feat[1:, node, node] = node_feat
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
for split_num in range(10):
self.train[split_num].graph_lists = [self_loop(g) for g in self.train[split_num].graph_lists]
self.val[split_num].graph_lists = [self_loop(g) for g in self.val[split_num].graph_lists]
self.test[split_num].graph_lists = [self_loop(g) for g in self.test[split_num].graph_lists]
for split_num in range(10):
self.train[split_num] = DGLFormDataset(self.train[split_num].graph_lists, self.train[split_num].graph_labels)
self.val[split_num] = DGLFormDataset(self.val[split_num].graph_lists, self.val[split_num].graph_labels)
self.test[split_num] = DGLFormDataset(self.test[split_num].graph_lists, self.test[split_num].graph_labels)
| 10,421 | 40.357143 | 129 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/data/SBMs.py |
import time
import os
import pickle
import numpy as np
import dgl
import torch
from scipy import sparse as sp
import numpy as np
class load_SBMsDataSetDGL(torch.utils.data.Dataset):
def __init__(self,
data_dir,
name,
split):
self.split = split
self.is_test = split.lower() in ['test', 'val']
with open(os.path.join(data_dir, name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
self.node_labels = []
self.graph_lists = []
self.n_samples = len(self.dataset)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.n_samples, self.split.upper()))
for data in self.dataset:
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(node_features.size(0))
g.ndata['feat'] = node_features.long()
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
# adding edge features for Residual Gated ConvNet
#edge_feat_dim = g.ndata['feat'].size(1) # dim same as node feature dim
edge_feat_dim = 1 # dim same as node feature dim
g.edata['feat'] = torch.ones(g.number_of_edges(), edge_feat_dim)
self.graph_lists.append(g)
self.node_labels.append(data.node_label)
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.node_labels[idx]
class SBMsDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/SBMs'
self.train = load_SBMsDataSetDGL(data_dir, name, split='train')
self.test = load_SBMsDataSetDGL(data_dir, name, split='test')
self.val = load_SBMsDataSetDGL(data_dir, name, split='val')
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in SBMsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# # Eigenvectors with numpy
# EigVal, EigVec = np.linalg.eig(L.toarray())
# idx = EigVal.argsort() # increasing order
# EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
# Eigenvectors with scipy
#EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.ndata['pos_enc'] = torch.from_numpy(np.real(EigVec[:,1:pos_enc_dim+1])).float()
return g
class SBMsDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/SBMs/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs which use; such as RingGNN and 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if self.name == 'SBM_CLUSTER':
self.num_node_type = 7
elif self.name == 'SBM_PATTERN':
self.num_node_type = 3
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(self.num_node_type)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_node_feat[node_label.item()+1][node][node] = 1
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
| 9,012 | 34.908367 | 127 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/parameters.py | import torch
from torch.backends import cudnn
from torch import cuda
import numpy as np
import argparse
import random
import os
import logging
import lib
logger = logging.getLogger("main")
parser = argparse.ArgumentParser(description='train.py')
## Data options
parser.add_argument('-traindata', default='dataset/train_data.json', help='Path to train data file')
parser.add_argument('-testdata', default='dataset/test_truth.json',help='Path to the test data file')
parser.add_argument('-valsplit', type=int, default=0,help='Number of examples for validation')
parser.add_argument('-vocab_size', type=int, default=None, help='Limit vocabulary')
parser.add_argument('-lowercase', action='store_true', default=False,help='Converting to lowercase')
parser.add_argument('-share_vocab', action='store_true',default=False,help='Shared vocabulary btw source and target')
parser.add_argument('-eos',action='store_true', default=False,help='Adding EOS token at the end of each sequence')
parser.add_argument('-bos',action='store_true', default=False,help='Adding BOS token in the beginning of each sequence')
parser.add_argument('-self_tok', action='store_true',default=False, help='Special token @self to indicate that the input is to be left alone')
parser.add_argument('-input', default='word', choices=['word', 'char', 'spelling', 'hybrid'],
help='character or word level representation, spelling (character model trained on pairs of words) and hybrid (word+spelling)')
parser.add_argument('-maxlen', type=int, default=None,help='Maximum source sequence length')
parser.add_argument('-correct_unique_mappings', action='store_true',default=False, help='Correct unique mappings before training')
parser.add_argument('-char_model', type=str, help='Path to the pretrained char level model')
parser.add_argument('-data_augm', action='store_true',default=False, help='Use data augmentation or not')
## Model options
parser.add_argument('-rnn_type', default='LSTM', choices=['LSTM', 'GRU'], help='Layer type [LSTM|GRU]')
parser.add_argument('-layers', type=int, default=1,help='Number of layers in the LSTM encoder/decoder')
parser.add_argument('-brnn', action='store_true', default=False,help='Use a bidirectional encoder')
parser.add_argument('-rnn_size', type=int, default=300,help='RNN cell hidden size')
parser.add_argument('-emb_size', type=int, default=100,help='Embedding size')
parser.add_argument('-attention', action='store_true', default=False,help='Use attention')
parser.add_argument('-bias', action='store_true', default=False,help='Add bias term')
parser.add_argument('-tie_decoder_embeddings', action='store_true', default=False,
help='Share parameters between decoder embeddings and output projection matrix. See https://arxiv.org/abs/1608.05859')
parser.add_argument('-share_embeddings', action='store_true', default=False,
help='Share the word embeddings between encoder and decoder. Drastically reduces number of learned parameters.')
parser.add_argument('-dropout', type=float, default=0.2,help='Dropout input of every RNN layer.')
parser.add_argument('-backward_splits', type=int, default=None,help='Backward with smaller batches to save memory.')
parser.add_argument('-teacher_forcing_ratio', type=float, default=0.6,help='Probablity of using teacher forcing (scheduled sampling)')
parser.add_argument('-noise_ratio', type=float, default=0.4,help='% extra noise to add')
## Training
parser.add_argument('-batch_size', type=int, default=32,help='Training batch size')
parser.add_argument('-start_epoch', type=int, default=1,help='Epoch to start training.')
parser.add_argument('-end_epoch', type=int, default=1,help='Number of supervised learning epochs')
parser.add_argument('-optim', default='adam', choices=['sgd', 'adam', 'adagrad', 'adadelta'],help='Optimization method.')
parser.add_argument('-lr', type=float, default=0.01,help='Initial learning rate')
parser.add_argument('-max_grad_norm', type=float, default=5,help='Clip gradients by max global gradient norm. See https://arxiv.org/abs/1211.5063')
parser.add_argument('-learning_rate_decay', type=float, default=0.05,help='Multiply learning with this value after -start_decay_after epochs')
parser.add_argument('-start_decay_after', type=int, default=15,help='Decay learning rate AFTER this epoch')
## GPU
parser.add_argument('-gpu', type=int, default=-1,help='GPU id. Support single GPU only')
parser.add_argument('-log_interval', type=int, default=1,help='Print stats after that many training steps')
parser.add_argument('-save_interval', type=int, default=-1,help='Save model and evaluate after that many training steps')
parser.add_argument('-seed', type=int, default=3435,help='Random seed')
parser.add_argument('-logfolder', action='store_true', default=False, help='Log output to file')
parser.add_argument('-save_dir',default='saving', help='Directory to save model checkpoints')
parser.add_argument('-load_from', type=str, help='Path to a model checkpoint')
## Inference
parser.add_argument('-eval', action='store_true',help='Evaluatation only mode')
parser.add_argument('-interactive', action='store_true',help='Interactive mode')
parser.add_argument('-max_train_decode_len', type=int, default=50,help='Max decoding length during training')
opt = parser.parse_args()
def change_args(opt):
torch.backends.cudnn.enabled = False
cudnn.benchmark = False
cudnn.deterministic = True
torch.backends.cudnn.deterministic = True
random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
np.random.seed(opt.seed)
if opt.save_dir and not os.path.exists(opt.save_dir): os.makedirs(opt.save_dir)
logging.basicConfig(filename=os.path.join(opt.save_dir, 'output.log') if opt.logfolder else None, level=logging.INFO)
if opt.self_tok: opt.self_tok=lib.constants.SELF
opt.cuda = (opt.gpu != -1) # Set cuda
if torch.cuda.is_available() and not opt.cuda:
logger.warning("WARNING: You have a CUDA device, so you should probably run with -gpu 1")
if opt.cuda: cuda.set_device(opt.gpu)
if opt.share_embeddings:
if not opt.share_vocab:
logger.warning('src/tgt vocab should be the same if you use share_embeddings! Changing share_vocab to True.')
opt.share_vocab = True
return opt
| 6,371 | 67.516129 | 147 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/metric/loss.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
def weighted_xent_loss(logits, targets, mask, normalize=True):
logits_flat = logits.contiguous().view(-1, logits.size(-1))
targets_flat = targets.contiguous().view(-1,)
log_dist = F.log_softmax(logits_flat, dim=-1)
losses = -log_dist.gather(1, targets_flat.unsqueeze(1)).squeeze(1)
losses = losses.view(*targets.size())
losses = losses * mask.float()
loss = losses.sum()
loss = loss / mask.float().sum() if normalize else loss
pred_flat = log_dist.max(1)[1]
num_corrects = int(pred_flat.eq(targets_flat).masked_select(mask.contiguous().view(-1)).float().data.sum()) \
if normalize else int(pred_flat.eq(targets_flat).float().data.sum())
return loss, num_corrects
def sequence_mask(sequence_length,max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (sequence_length.unsqueeze(1).expand_as(seq_range_expand))
mask = seq_range_expand < seq_length_expand
return mask
| 1,354 | 41.34375 | 113 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/metric/utils.py | import torch.nn.functional as F
from torch.autograd import Variable
import lib
import functools
import torch
import logging
logger = logging.getLogger("model")
def clean_sentence(sent, remove_unk=False, remove_eos=True, remove_bos=True):
if lib.constants.EOS_WORD in sent:
sent = sent[:sent.index(lib.constants.EOS_WORD) + 1]
if remove_unk:
sent = filter(lambda x: x != lib.constants.UNK_WORD, sent)
if remove_eos:
if len(sent) > 0 and sent[-1] == lib.constants.EOS_WORD:
sent = sent[:-1]
if remove_bos:
if len(sent) > 0 and sent[0] == lib.constants.BOS_WORD:
sent = sent[1:]
return sent
def handle_tags(input_words, pred_words):
assert len(input_words) == len(pred_words)
ret = []
for input_tokens, pred_tokens in zip(input_words, pred_words):
if lib.constants.URL in pred_tokens or lib.constants.HASH in pred_tokens or lib.constants.MENTION in pred_tokens:
sent_length = min(len(input_tokens),len(pred_tokens))
for i in range(sent_length):
if(pred_tokens[i] == lib.constants.URL or pred_tokens[i] == lib.constants.HASH or pred_tokens[i] == lib.constants.MENTION):
pred_tokens[i] = input_tokens[i]
ret.append(pred_tokens)
return ret
def handle_numbers(input_words, pred_words):
assert len(input_words) == len(pred_words)
ret = []
for input_tokens, pred_tokens in zip(input_words, pred_words):
sent_length = min(len(input_tokens),len(pred_tokens))
for i in range(sent_length):
if(any(char.isdigit() for char in pred_tokens[i])):
pred_tokens[i] = input_tokens[i]
ret.append(pred_tokens)
return ret
def handle_unk(input, input_words, pred_words, unk_model,unkowns_file=None):
if(unk_model):
assert len(input) == len(pred_words)
ret = []
for input_tokens, input_words_tokens, pred_tokens in zip(input, input_words, pred_words):
if lib.constants.UNK_WORD in input_tokens:
sent_length = min(len(input_tokens),len(pred_tokens))
for i in range(sent_length):
if(input_tokens[i]==lib.constants.UNK_WORD):
unk_src = unk_model.encoder.vocab.to_indices(input_words_tokens[i],
eosWord=unk_model.opt.eos,bosWord=unk_model.opt.bos).view(1, -1)
#Repeat as many times as the batch size, awful but works
unk_src = torch.cat([unk_src]*unk_model.opt.batch_size)
unk_src = Variable(unk_src)
if input_words_tokens[i] == '' or input_words_tokens[i] == ' ':
continue
src_lens = Variable(torch.LongTensor([len(p) for p in unk_src]))
if unk_model.opt.cuda: unk_src = unk_src.cuda()
if unk_model.opt.cuda: src_lens = src_lens.cuda()
unk_src = unk_src.t()
batch = {}
batch['src'] = unk_src, src_lens
batch['tgt'] = unk_src, src_lens
probs, translation = unk_model.translate(batch)
confidence = probs.transpose()[0].max()
translation = translation.t().tolist()
trsl2wrds = lib.metric.to_words(translation, unk_model.encoder.vocab)
if unkowns_file: unkowns_file.writerow([input_words_tokens[i], ''.join(trsl2wrds[0]), confidence])
pred_tokens[i] = ''.join(trsl2wrds[0]) if confidence > 50.0 and input_words_tokens[i].isalpha() else input_words_tokens[i]
if input_words_tokens[i]!=pred_tokens[i]: logger.info('secondary model confidence:{}, unk_word:{}, prediction:{}'.format(confidence, input_words_tokens[i], pred_tokens[i]))
ret.append(pred_tokens)
else:
ret = copy_unks(input, input_words, pred_words)
return ret
def copy_unks(input, input_words, pred_words):
assert len(input) == len(pred_words)
ret = []
for input_tokens, input_words_tokens, pred_tokens in zip(input, input_words, pred_words):
if lib.constants.UNK_WORD in input_tokens or lib.constants.UNK_WORD in pred_tokens:
sent_length = min(len(input_tokens),len(pred_tokens))
for i in range(sent_length):
if(input_tokens[i] == lib.constants.UNK_WORD or pred_tokens[i] == lib.constants.UNK_WORD):
pred_tokens[i] = input_words_tokens[i]
ret.append(pred_tokens)
return ret
def clean_self_toks(inputs, preds, token):
ret_preds = []
for input_tokens, pred_tokens in zip(inputs, preds):
if token in pred_tokens:
length = min(len(input_tokens), len(pred_tokens))
for i in range(length):
if pred_tokens[i] == token:
pred_tokens[i] = input_tokens[i]
ret_preds.append(pred_tokens)
return ret_preds
def to_words(sents, dict):
ret = []
for sent in sents:
sent = [dict.itos(id) for id in sent]
sent = clean_sentence(sent, remove_unk=False)
ret.append(sent)
return ret
def char_to_words(sents):
ret = []
for sent in sents:
sent = ''.join(sent).split('#')
ret.append(sent)
return ret
def compute_single(pair, metric_fn=None):
input, pred, gold = pair
if len(pred) == 0:
score = 0.
else:
score = metric_fn(input, pred, gold)['f1']
return score
def compute_batch(inputs, preds, golds, metric_fn):
compute_single_with_metric = functools.partial(compute_single, metric_fn=metric_fn)
scores = map(compute_single_with_metric, zip(inputs, preds, golds))
return list(scores)
def compute_numcorrects(dec_logits, targets, pad_masks=None):
log_dist = F.log_softmax(dec_logits, dim=-1)
pred_flat = log_dist.max(-1)[1]
num_corrects = int(pred_flat.eq(targets).masked_select(pad_masks).float().data.sum()) if pad_masks is not None\
else int(pred_flat.eq(targets).float().data.sum())
return num_corrects
| 6,223 | 40.771812 | 196 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/train/optim.py | from torch.nn.utils import clip_grad_norm_
import torch.optim as optim
import logging
logger = logging.getLogger("optim")
class Optim(object):
def _makeOptimizer(self):
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.lr)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.lr)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.lr)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.lr)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def __init__(self, params, method, lr, max_grad_norm, lr_decay=1, start_decay_after=None):
self.params = list(params) # careful: params may be a generator
self.last_loss = None
self.lr = lr
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_after = start_decay_after
self._makeOptimizer()
def step(self):
clip_grad_norm_(self.params, self.max_grad_norm)
self.optimizer.step()
def set_lr(self, lr):
self.lr = lr
self.optimizer.param_groups[0]["lr"] = lr
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, d):
return self.optimizer.load_state_dict(d)
def update_lr(self, loss, epoch):
if self.start_decay_after is not None and epoch >= self.start_decay_after:
if self.last_loss is not None and loss > self.last_loss:
logging.info("Decaying learning rate from {} to {}".format(self.lr, self.lr * self.lr_decay))
self.set_lr(self.lr * self.lr_decay)
self.last_loss = loss
| 1,825 | 34.803922 | 109 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/train/trainer.py | import os
import time
import torch
import logging
import lib
logger = logging.getLogger("train")
class Trainer(object):
def __init__(self, model, evaluator, train_data, eval_data, optim, opt, test_eval=None):
self.model = model
self.evaluator = evaluator
self.train_data = train_data
self.eval_data = eval_data
self.optim = optim
self.opt = opt
self.test_eval = test_eval
self.criterion = lib.metric.weighted_xent_loss
def train(self, start_epoch, end_epoch):
if(self.opt.save_interval==-1): self.opt.save_interval=end_epoch+1
for epoch in range(start_epoch, end_epoch + 1):
logger.info('\n* TextNorm epoch *')
logger.info('Model optim lr: %g' % self.optim.lr)
total_loss, total_accuracy = self.train_epoch(epoch)
logger.info('Train loss: %.2f' % total_loss)
logger.info('Train total_accuracy: %.2f' % total_accuracy)
valid_loss, valid_f1 = self.evaluator.eval(self.eval_data)
self.optim.update_lr(valid_loss, epoch)
if epoch % self.opt.save_interval == 0 or epoch==end_epoch:
checkpoint = {
'model_state_dict': self.model.state_dict(),
'optim_state_dict': self.optim.state_dict(),
'opt': self.opt,
'epoch': epoch,
}
model_name = os.path.join(self.opt.save_dir, "model_%d" % epoch)
model_name += "_"+self.opt.input+".pt"
torch.save(checkpoint, model_name)
logger.info('Save model as %s' % model_name)
def train_epoch(self, epoch):
self.model.train()
epoch_time = time.time()
train_data = lib.data.Dataset(self.train_data, self.opt)
num_batches = train_data.num_batches
train_iter = train_data.batches()
total_loss, total_corrects, total_tgts = 0, 0, 0
for i, batch in enumerate(train_iter):
self.model.train()
tgt, tgt_lens = batch['tgt']
src, src_lens = batch['src']
outputs = self.model(batch)
self.model.zero_grad()
pad_masks = lib.metric.sequence_mask(sequence_length=tgt_lens, max_len=tgt.size(0)).transpose(0,1)
loss, num_corrects = self.model.backward(outputs, tgt, pad_masks, criterion=self.criterion)
num_words = (tgt.data.ne(lib.constants.PAD).sum() + src.data.ne(lib.constants.PAD).sum()).item()
num_tgts = tgt_lens.data.sum().item()
total_loss += loss
total_corrects += num_corrects
total_tgts += num_tgts
self.optim.step()
if (i + 1) % self.opt.log_interval == 0:
words_pers = int(num_words / (time.time() - epoch_time))
accuracy = 100 * (num_corrects/float(num_tgts))
logger.info('Epoch %3d, %6d/%d batches loss:%f, num_words:%d, accuracy:%f' %
(epoch, i + 1, num_batches, loss, words_pers, accuracy))
return total_loss/float(num_batches), 100*(total_corrects/float(total_tgts)) | 3,167 | 44.913043 | 110 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/data/Dataset.py | from torch.autograd import Variable
import torch
import lib
class Dataset(object):
def __init__(self, data, opt):
self.DATA_KEYS = data.keys()
self.TENSOR_KEYS = ['src', 'tgt']
for key in self.DATA_KEYS:
setattr(self, key, data[key])
self.opt = opt
self.size = len(self.src)
self.num_batches = (self.size + self.opt.batch_size - 1) // self.opt.batch_size
def __len__(self):
return self.num_batches
def _to_tensor(self, data, return_lens):
lens = [x.size(0) for x in data]
max_length = max(lens)
out = data[0].new(len(data), max_length).fill_(lib.constants.PAD)
for i in range(len(data)):
data_length = data[i].size(0)
out[i].narrow(0, 0, data_length).copy_(data[i])
out = out.t_().contiguous()
if self.opt.cuda: out = out.cuda()
v = Variable(out)
lens = Variable(torch.LongTensor(lens))
if self.opt.cuda: lens = lens.cuda()
return (v, lens) if return_lens else v
def batches(self):
for i in range(self.num_batches):
s_idx = i * self.opt.batch_size
e_idx = (i + 1) * self.opt.batch_size
src_idx_in_data_keys = list(self.DATA_KEYS).index('src')
value_lists = [getattr(self, key)[s_idx : e_idx] for key in self.DATA_KEYS]
sorted_value_lists = zip(*sorted(list(zip(*value_lists)),
key=lambda x: -x[src_idx_in_data_keys].size(0)))
sorted_value_lists = list(sorted_value_lists)
batch = {}
for key, value in zip(self.DATA_KEYS, sorted_value_lists):
batch[key] = value
if key in self.TENSOR_KEYS:
batch[key] = self._to_tensor(value, return_lens=True)
batch['size'] = len(batch['pos'])
yield batch
| 1,906 | 38.729167 | 93 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/data/Dict.py | from collections import Counter
from .constants import *
import torch
class Dict(object):
def __init__(self, vocab_size, bosWord=None, eosWord=None):
self.vocab = []
self.vocab_counts = None
self.vocab_size = vocab_size
self.bosWord=bosWord
self.eosWord=eosWord
self.unkown_words=[]
@property
def size(self):
return len(self.label_to_idx)
def __len__(self):
return len(self.label_to_idx)
def add_words(self, sequence):
for word in sequence:
self.vocab.append(word)
def makeVocabulary(self, vocab_size=None):
self.vocab = Counter(self.vocab)
self.vocab_counts = Counter(self.vocab)
self.vocab = self.prune(vocab_size)
self.vocab.append(PAD_WORD)
self.vocab.append(UNK_WORD)
if(self.bosWord): self.vocab.append(BOS_WORD)
if(self.eosWord): self.vocab.append(EOS_WORD)
def makeLabelToIdx(self):
self.label_to_idx = {PAD_WORD:PAD, UNK_WORD:UNK}
self.idx_to_label = {PAD:PAD_WORD, UNK:UNK_WORD}
if(self.bosWord):
self.bosWord = BOS_WORD
self.label_to_idx[BOS_WORD]=BOS
self.idx_to_label[BOS]=BOS_WORD
if(self.eosWord):
self.eosWord = EOS_WORD
self.label_to_idx[EOS_WORD]=EOS
self.idx_to_label[EOS]=EOS_WORD
for item in self.vocab:
if(item not in self.label_to_idx):
self.label_to_idx[item] = len(self.label_to_idx)
self.idx_to_label[len(self.idx_to_label)] = item
#TODO: bug when EOS is used and BOS is not used!
assert item == self.idx_to_label[self.label_to_idx[item]]
# Return a new dictionary with the `size` most frequent entries.
def prune(self, vocab_size=None):
if(vocab_size is None): vocab_size = -1
if vocab_size >= len(self.vocab) or (vocab_size == -1):
return sorted(self.vocab, key=self.vocab.get, reverse=True)
newvocab = self.vocab.most_common(vocab_size)
self.vocab_counts = self.vocab_counts.most_common(vocab_size)
# Only keep the `size` most frequent entries.
return sorted(newvocab, key=newvocab.get, reverse=True)
def stoi(self, label, default=None):
try:
return self.label_to_idx[label]
except KeyError:
self.unkown_words.append(label)
return default
def itos(self, idx, default=None):
try:
return self.idx_to_label[idx]
except KeyError:
return default
def to_indices(self, labels, bosWord=False, eosWord=False):
vec = []
if bosWord:
vec += [self.stoi(BOS_WORD)]
unk = self.stoi(UNK_WORD)
vec += [self.stoi(label, default=unk) for label in labels]
if eosWord:
vec += [self.stoi(EOS_WORD)]
return torch.LongTensor(vec)
def to_labels(self, idx, stop):
labels = []
for i in idx:
labels += [self.itos(i)]
if i == stop:
break
return labels
| 3,135 | 32.72043 | 73 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/model/model_factory.py | import lib
import torch
import logging
logger = logging.getLogger("model")
def build_model(vocabs, opt):
src_vocab, tgt_vocab = vocabs
encoder = lib.model.EncoderRNN(opt, src_vocab)
decoder = lib.model.LuongAttnDecoderRNN(opt, tgt_vocab)
s2smodel = lib.model.Seq2Seq(encoder, decoder, opt)
optim = create_optim(s2smodel, opt)
return s2smodel, optim
def create_optim(model, opt):
trained_params = filter(lambda p: p.requires_grad, model.parameters())
return lib.train.Optim(trained_params, opt.optim, opt.lr, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay, start_decay_after=opt.start_decay_after)
def create_model(vocabs, opt, is_char_model=False):
model_state = 'model_state_dict'
optim_state = 'optim_state_dict'
if opt.load_from is not None or (opt.char_model != None and is_char_model):
load_loc = opt.load_from if not is_char_model else opt.char_model
logger.info('Loading model from checkpoint at {}'.format(load_loc))
if opt.cuda:
location = lambda storage, loc: storage.cuda(opt.gpu)
else:
location = lambda storage, loc: storage
checkpoint = torch.load(load_loc,map_location=location)
checkpoint['opt'].cuda = opt.cuda
model, optim = build_model(vocabs, checkpoint['opt'])
model.load_state_dict(checkpoint[model_state])
optim.load_state_dict(checkpoint[optim_state])
opt.start_epoch = checkpoint['epoch'] + 1
opt.batch_size = checkpoint['opt'].batch_size
else:
logger.info('Building Model')
model, optim = build_model(vocabs, opt)
if opt.cuda: model.cuda() # GPU.
nParams = sum([p.nelement() for p in model.parameters()])
logger.info('* number of parameters: %d' % nParams)
return model, optim
| 1,834 | 38.042553 | 101 | py |
TextNormSeq2Seq | TextNormSeq2Seq-master/lib/model/model.py | import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
import torch
import lib
import random
class EncoderRNN(nn.Module):
def __init__(self, opt, vocab):
super(EncoderRNN, self).__init__()
self.vocab = vocab
self.opt = opt
self.vocab_size = len(self.vocab)
self.num_directions = 2 if self.opt.brnn else 1
self.embedding = nn.Embedding(self.vocab_size, opt.emb_size, padding_idx=lib.constants.PAD)
self.rnn = getattr(nn, self.opt.rnn_type)(
input_size=self.opt.emb_size,
hidden_size=opt.rnn_size // self.num_directions,
num_layers=self.opt.layers,
dropout=self.opt.dropout,
bidirectional=self.opt.brnn)
def forward(self, src, src_lens, hidden=None):
emb = self.embedding(src)
packed_emb = nn.utils.rnn.pack_padded_sequence(emb, src_lens)
packed_outputs, self.hidden = self.rnn(packed_emb, hidden)
outputs, output_lens = nn.utils.rnn.pad_packed_sequence(packed_outputs)
if self.opt.brnn: self.hidden = self._cat_directions(self.hidden)
return outputs, self.hidden
def _cat_directions(self, hidden):
def _cat(h):
return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
if isinstance(hidden, tuple): #LSTM
hidden = tuple([_cat(h) for h in hidden])
else: #GRU
hidden = _cat(hidden)
return hidden
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, opt, vocab):
super(LuongAttnDecoderRNN, self).__init__()
self.opt = opt
self.vocab = vocab
self.vocab_size = len(self.vocab)
self.tanh = nn.Tanh()
self.embedding = nn.Embedding(self.vocab_size, opt.emb_size, padding_idx=lib.constants.PAD)
self.rnn = getattr(nn, self.opt.rnn_type)(
input_size=self.opt.emb_size,
hidden_size=self.opt.rnn_size,
num_layers=self.opt.layers,
dropout=self.opt.dropout)
if self.opt.attention:
self.W_a = nn.Linear(self.opt.rnn_size, self.opt.rnn_size, bias=opt.bias)
self.W_c = nn.Linear(self.opt.rnn_size + self.opt.rnn_size, self.opt.rnn_size, bias=opt.bias)
if self.opt.tie_decoder_embeddings and self.vocab_size!=1:
self.W_proj = nn.Linear(self.opt.rnn_size, self.opt.emb_size, bias=opt.bias)
self.W_s = nn.Linear(self.opt.emb_size, self.vocab_size, bias=opt.bias)
self.W_s.weight = self.embedding.weight
else:
self.W_s = nn.Linear(self.opt.rnn_size, self.vocab_size, bias=opt.bias)
def forward(self, src, src_lens, encoder_outputs, decoder_hidden):
emb = self.embedding(src.unsqueeze(0))
decoder_output, self.decoder_hidden = self.rnn(emb, decoder_hidden)
decoder_output = decoder_output.transpose(0,1)
if self.opt.attention:
attention_scores = torch.bmm(decoder_output, self.W_a(encoder_outputs).transpose(0,1).transpose(1,2))
attention_mask = lib.metric.sequence_mask(src_lens).unsqueeze(1)
attention_scores.data.masked_fill_(1 - attention_mask.data, -float('inf'))
attention_weights = F.softmax(attention_scores.squeeze(1), dim=1).unsqueeze(1)
context_vector = torch.bmm(attention_weights, encoder_outputs.transpose(0,1))
concat_input = torch.cat([context_vector, decoder_output], -1)
concat_output = self.tanh(self.W_c(concat_input))
attention_weights = attention_weights.squeeze(1)
else:
attention_weights = None
concat_output = decoder_output
if self.opt.tie_decoder_embeddings and self.vocab_size!=1:
output = self.W_s(self.W_proj(concat_output))
else:
output = self.W_s(concat_output)
output = output.squeeze(1)
del src_lens
return output, self.decoder_hidden, attention_weights
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, opt):
super(Seq2Seq, self).__init__()
self.torch = torch.cuda if opt.cuda else torch
self.encoder = encoder
self.decoder = decoder
self.opt = opt
if opt.share_embeddings:
self.encoder.embedding.weight = self.decoder.embedding.weight
def forward(self, batch, eval=False):
tgt, tgt_lens = batch['tgt']
src, src_lens = batch['src']
batch_size = src.size(1)
assert(batch_size == tgt.size(1))
input_seq = Variable(torch.LongTensor([lib.constants.BOS] * batch_size))
decoder_outputs = Variable(torch.zeros(self.opt.max_train_decode_len, batch_size, self.decoder.vocab_size))
if self.opt.cuda: input_seq, decoder_outputs = input_seq.cuda(), decoder_outputs.cuda()
max_tgt_len = tgt.size()[0]
encoder_outputs, encoder_hidden = self.encoder(src, src_lens.data.tolist())
decoder_hidden = encoder_hidden
use_teacher_forcing = False if eval else random.random() < self.opt.teacher_forcing_ratio
for t in range(max_tgt_len):
decoder_output, decoder_hidden, attention_weights = self.decoder(input_seq, src_lens, encoder_outputs, decoder_hidden)
decoder_outputs[t] = decoder_output
if use_teacher_forcing:
input_seq = tgt[t]
else:
topv, topi = decoder_output.topk(1)
input_seq = topi.squeeze()
return decoder_outputs
def backward(self, outputs, tgt_seqs, mask, criterion, eval=False, normalize=True):
max_tgt_len = tgt_seqs.size()[0]
logits = outputs[:max_tgt_len]
loss, num_corrects = criterion(logits, tgt_seqs, mask, normalize=normalize)
if(not eval): loss.backward()
return loss.item(), num_corrects
def translate(self, batch):
tgt, tgt_lens = batch['tgt']
src, src_lens = batch['src']
batch_size = src.size(1)
assert (batch_size == tgt.size(1))
input_seq = Variable(torch.LongTensor([lib.constants.BOS] * batch_size))
decoder_outputs = Variable(torch.zeros(self.opt.max_train_decode_len, batch_size, self.decoder.vocab_size))
if self.opt.cuda: input_seq, decoder_outputs = input_seq.cuda(), decoder_outputs.cuda()
encoder_outputs, encoder_hidden = self.encoder(src, src_lens.data.tolist())
decoder_hidden = encoder_hidden
if self.opt.attention: all_attention_weights = torch.zeros(self.opt.max_train_decode_len, src.size(1), len(src))
end_of_batch_pred = np.array([lib.constants.EOS] * len(src_lens))
preds = np.ones((self.opt.max_train_decode_len, len(src_lens))) * 2
probs = np.ones((self.opt.max_train_decode_len, len(src_lens))) * 2
for t in range(self.opt.max_train_decode_len):
decoder_output, decoder_hidden, attention_weights = self.decoder(input_seq, src_lens, encoder_outputs, decoder_hidden)
if self.opt.attention:
all_attention_weights[t] = attention_weights.cpu().data
prob, token_ids = decoder_output.data.topk(1)
token_ids = token_ids.squeeze()
prob = prob.squeeze()
preds[t,:] = token_ids
probs[t,:] = prob
input_seq = Variable(token_ids)
if np.sum(np.equal(token_ids.cpu().numpy(),end_of_batch_pred)) == len(src):
break
preds = torch.LongTensor(preds)
return probs, preds | 7,541 | 46.433962 | 130 | py |
booksum | booksum-main/alignments/paragraph-level-summary-alignments/align_data_bi_encoder_paraphrase.py | """
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
Script used to generate alignments of the paragraphs with sentences from the summary using a paraphrase biencoder model - https://huggingface.co/sentence-transformers/paraphrase-distilroberta-base-v1.
The summary sentences that match with the same paragraph are then aggregated together.
It is recommended to run this script on a GPU machine.
"""
#!/usr/bin/env python
# coding: utf-8
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter("ignore", ResourceWarning)
import argparse
import json
import os
from os.path import basename
import sys
import pprint
import spacy
import numpy as np
import warnings
from tqdm import tqdm
from matplotlib import pyplot as plt
from matching.games import HospitalResident
from sentence_transformers import SentenceTransformer, util
# change recursion limit
sys.setrecursionlimit(5000)
# https://huggingface.co/sentence-transformers/paraphrase-distilroberta-base-v1
model_bi_encoder_paraphrase = SentenceTransformer('paraphrase-distilroberta-base-v1')
model_bi_encoder_paraphrase.max_seq_length = 512
pp = pprint.PrettyPrinter(indent=2)
error_logs_file = open("error_logs.jsonl","a")
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
# Breaks down chapter text into smaller length paragraphs that we can align ground truth summary sentences to
def merge_text_paragraphs(paragraphs, min_sent=3, max_sent=12):
spacy_nlp = spacy.load("en_core_web_lg")
new_paragraphs = []
temp_paragraphs = []
temp_paragraphs_cnt = 0
for paragraph in paragraphs:
paragraph_len = len(list(spacy_nlp(paragraph).sents))
if paragraph_len > min_sent:
if temp_paragraphs:
assert len(temp_paragraphs) <= max_sent
joined = " ".join(temp_paragraphs)
new_paragraphs.append(joined)
temp_paragraphs = []
temp_paragraphs_cnt = 0
new_paragraphs.append(paragraph)
else:
if temp_paragraphs_cnt + paragraph_len > max_sent:
assert len(temp_paragraphs) <= max_sent
joined = " ".join(temp_paragraphs)
new_paragraphs.append(joined)
temp_paragraphs = [paragraph]
temp_paragraphs_cnt = paragraph_len
else:
temp_paragraphs.append(paragraph)
temp_paragraphs_cnt += paragraph_len
if temp_paragraphs:
assert len(temp_paragraphs) <= max_sent
joined = " ".join(temp_paragraphs)
new_list_len = list(spacy_nlp(joined).sents)
new_paragraphs.append(joined)
temp_paragraphs = []
temp_paragraphs_cnt = 0
return new_paragraphs
def align_data_greedy_matching(similarity_matrix):
summ_cnt, text_cnt = similarity_matrix.shape
# extract alignments
alignments = np.argmax(similarity_matrix, axis=1).tolist()
# create alignment matrix
return alignments
def align_data_stable_matching(similarity_matrix, text_capacity):
# text paragraphs -> hospital
# summary paragraphs -> resident
summ_cnt, text_cnt = similarity_matrix.shape
summ_ids = ["%dS" % ix for ix in range(summ_cnt)]
text_ids = ["%dT" % ix for ix in range(text_cnt)]
ids_summ = {key: ix for ix, key in enumerate(summ_ids)}
ids_text = {key: ix for ix, key in enumerate(text_ids)}
# organize summary sentences preferences
summ_prefs = {key: [] for _, key in enumerate(summ_ids)}
for summ_ix, summ_id in enumerate(summ_ids):
alignment = np.argsort(similarity_matrix[summ_ix, :])[::-1]
summ_prefs[summ_id] = [text_ids[ix] for ix in alignment]
# organize text paragraph preferences
text_prefs = {key: [] for _, key in enumerate(text_ids)}
for text_ix, text_id in enumerate(text_ids):
alignment = np.argsort(similarity_matrix[:, text_ix])[::-1]
text_prefs[text_id] = [summ_ids[ix] for ix in alignment]
# update matching capacity
capacity = {key: text_capacity for key in text_ids}
# run matching algorithm
game = HospitalResident.create_from_dictionaries(summ_prefs, text_prefs, capacity)
matching = game.solve(optimal="hospital")
# extract alignments
alignments = [-1] * summ_cnt
for t_key, s_keys in matching.items():
for s_key in s_keys:
alignments[ids_summ[s_key.name]] = ids_text[t_key.name]
return alignments
#Matrix of bi-encoder scores b/w all pairwise paras and summaries #summaries X #paras
#Using paraphrase-distilroberta-base-v1 bi encoder alignments
#https://www.sbert.net/docs/usage/semantic_textual_similarity.html
def compute_similarities_bi_encoder(paragraphs, summaries):
paragraphs_embeddings_paraphrase = model_bi_encoder_paraphrase.encode(paragraphs, convert_to_tensor=True)
summaries_embeddings_paraphrase = model_bi_encoder_paraphrase.encode(summaries, convert_to_tensor=True)
similarity_matrix_bi_encoder_paraphrase = util.pytorch_cos_sim(summaries_embeddings_paraphrase, paragraphs_embeddings_paraphrase).cpu().numpy()
return similarity_matrix_bi_encoder_paraphrase
def gather_data(alignments_bi_encoder_paraphrase, paragraphs, summaries, similarity_matrix_bi_encoder_paraphrase, title):
examples = []
all_alignments = alignments_bi_encoder_paraphrase
for s_ix, t_ix_bienc_p in enumerate(all_alignments):
# print (s_ix, t_ix)
example = {
"summary_sentence": summaries[s_ix],
"paragraph_alignment": paragraphs[t_ix_bienc_p],
"alignment_score": str(similarity_matrix_bi_encoder_paraphrase[s_ix][t_ix_bienc_p]),
"title": title + "-" + str(t_ix_bienc_p) # title has the id of the paragraph each summary sentence is aligned with
}
examples.append(example)
return examples
def visualize_alignments(similarity_matrix, alignments, title, output_dir=None):
summ_cnt = len(alignments)
alignment_matrix = np.zeros_like(similarity_matrix)
for ix in range(summ_cnt):
alignment_matrix[ix][alignments[ix]] = 1
plt.figure(figsize=(20,10))
fig, (ax1, ax2) = plt.subplots(2, sharey=True, figsize=(20, 10))
fig.suptitle(title)
ax1.imshow(similarity_matrix, cmap='gray', interpolation='nearest')
ax1.set_title("Similarity matrix")
ax2.imshow(alignment_matrix, cmap='gray', interpolation='nearest')
ax2.set_title("Alignment matrix")
if output_dir:
plt.savefig(os.path.join(output_dir, title + ".png"), dpi=100)
plt.close()
# Combine sentences from the summary that align with the same paragraph
def aggregate_paragraph_summary_alignments(examples):
aggregated_alignments = []
paragraph_dict = {}
for ex in examples:
# aggregate on the paragraph title
title = ex['title']
if title not in paragraph_dict:
paragraph_dict[title] = [ ex['paragraph_alignment'], [ex['summary_sentence']], [ex['alignment_score']] ]
else:
assert paragraph_dict[title][0] == ex['paragraph_alignment']
paragraph_dict[title][1].append(ex['summary_sentence'])
paragraph_dict[title][2].append(ex['alignment_score'])
for para_title, alignments in paragraph_dict.items():
agg_example = {
'text' : alignments[0],
'summary' : alignments[1],
'alignment_scores' : alignments[2],
'title' : para_title
}
aggregated_alignments.append(agg_example)
return aggregated_alignments
def main(args):
# load data
with open(args.data_path) as fd:
data = [json.loads(line) for line in fd]
# Create alignment file
if args.stable_alignment:
f_stable_alignments = open(basename(args.data_path) + ".stable", "w")
if args.greedy_alignment:
f_greedy_alignments = open(basename(args.data_path) + ".greedy", "w")
# align each example
for ix, example in enumerate(tqdm(data)):
if example['summary'] == []:
continue
chap_path = example["chapter_path"]
print ("chap path: ", chap_path)
# merge text paragraphs
summaries = [sent for sent in example["summary"] if sent]
paragraphs_before_merge = [sent for sent in example["text"] if sent]
#Convert long book chapters into paragraphs
paragraphs = merge_text_paragraphs(paragraphs_before_merge, args.merging_min_sents, args.merging_max_sents)
# compute similarities
#Initially we tried both roberta and paraphrase bi encoder
similarity_matrix_bi_encoder_paraphrase = compute_similarities_bi_encoder(paragraphs, summaries)
# For all our experimental results, we perform stable alignment
if args.stable_alignment:
stable_alignments_bi_encoder_paraphrase = align_data_stable_matching(similarity_matrix_bi_encoder_paraphrase, args.alignment_capacity)
# print ("stable_alignments_bi_encoder_paraphrase: ", stable_alignments_bi_encoder_paraphrase)
# Add a title to uniquely distinguish paragraphs. Has source, book and chapter info
title = "%s.%s-stable" % (example["book_id"].lower().replace(" ", "_"), example["source"].lower())
stable_examples = gather_data(stable_alignments_bi_encoder_paraphrase, paragraphs, summaries, similarity_matrix_bi_encoder_paraphrase, title)
# visualize_alignments(similarity_matrix_bi_encoder_paraphrase, stable_alignments_bi_encoder_paraphrase, title, args.output_dir)
stable_examples_aggregated = aggregate_paragraph_summary_alignments(stable_examples)
for stable_example in stable_examples_aggregated:
f_stable_alignments.write(json.dumps(stable_example) + "\n")
if args.greedy_alignment:
title = "%s.%s-greedy" % (example["book_id"].lower().replace(" ", "_"), example["source"].lower())
greedy_alignments = align_data_greedy_matching(similarity_matrix_bi_encoder_paraphrase)
greedy_examples = gather_data(greedy_alignments, paragraphs, summaries, similarity_matrix_bi_encoder_paraphrase, title)
greedy_examples_aggregated = aggregate_paragraph_summary_alignments(greedy_examples)
for greedy_example in greedy_examples_aggregated:
f_greedy_alignments.write(json.dumps(greedy_example) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help='path to gathered data file')
parser.add_argument('--similarity_fn', type=str, default='weighted', choices=['weighted', 'original'], help='function used for similarity evaluation')
parser.add_argument('--stable_alignment', action='store_true', help='function used for aligning')
parser.add_argument('--greedy_alignment', action='store_true', help='function used for aligning')
parser.add_argument('--merging_min_sents', type=int, default=4, help='')
parser.add_argument('--merging_max_sents', type=int, default=12, help='')
parser.add_argument('--alignment_capacity', type=int, default=10, help='')
parser.add_argument('--save_figs', action='store_true', help='function used for aligning')
args = parser.parse_args()
if not (args.stable_alignment or args.greedy_alignment):
raise RuntimeError("At least one alignment option must be chosen: `stable_alignment`, `greedy_alignment`.")
if args.save_figs:
args.output_dir = os.path.join(os.path.dirname(args.data_path), "saved_figs")
os.makedirs(args.output_dir, exist_ok=True)
else:
args.output_dir = None
main(args)
| 12,003 | 37.474359 | 200 | py |
simple-sashimi | simple-sashimi-master/hubconf.py | dependencies = ['torch', 'torchaudio', 'einops', 'opt_einsum', 'fastprogress', 'omegaconf']
import torch
from pathlib import Path
from sashimi.model import Sashimi, SashimiAR
from omegaconf import OmegaConf
def sashimi_ar_sc09(pretrained=True, progress=True, device='cuda'):
""" SaShiMi autoregressive model trained on SC09 dataset. """
checkpoint = torch.hub.load_state_dict_from_url(
'https://github.com/RF5/simple-sashimi/releases/download/v1.0/ckpt_01100000.pt',
map_location=device, progress=progress
)
cfg = OmegaConf.create(checkpoint['cfg_yaml'])
sashimi = SashimiAR(cfg.model_cfg).to(device)
if pretrained:
sashimi.load_state_dict(checkpoint['model_state_dict'])
print(f"[MODEL] Sashimi loaded with {sum([p.numel() for p in sashimi.parameters()]):,d} parameters.")
sashimi = sashimi.eval()
return sashimi
| 893 | 34.76 | 105 | py |
simple-sashimi | simple-sashimi-master/train.py | import argparse
import logging
import os, gc
import math
import random
import time
from dataclasses import dataclass, field
from typing import Tuple, Union
import numpy as np
import pandas as pd
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from fastprogress import master_bar, progress_bar
from omegaconf import MISSING, OmegaConf, open_dict
from omegaconf.dictconfig import DictConfig
from omegaconf.listconfig import ListConfig
from torch.cuda.amp.grad_scaler import GradScaler
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from sashimi.config import AutoregressiveConfig
from sashimi.dataset import ARCollate, ARDataset
from sashimi.model import SashimiAR
@dataclass
class DistributedConfig:
dist_backend: str = 'nccl'
dist_url: str = "tcp://localhost:54321"
n_nodes: int = 1
n_gpus_per_node: int = 1
@dataclass
class TrainConfig:
# Distributed settings
distributed: DistributedConfig = DistributedConfig()
# Model settings
model_cfg: AutoregressiveConfig = AutoregressiveConfig()
device: str = 'cuda'
seed: int = 1775
batch_size: int = 8
num_workers: int = 8
fp16: bool = False
max_steps: int = 1_100_000 # 1.1M steps for SC09
summary_interval: int = 25
checkpoint_interval: int = 2500
stdout_interval: int = 100
validation_interval: int = 1000
# Learning settings
start_lr: float = 4e-3 # unspecified for sashimi in paper. Using their config.
# plateau lr schedule settings
plateau_mode: str = "min"
plateau_factor: float = 0.2
plateau_patience: int = 20
plateau_min_lr: float = 0.0
grad_clip: float = 0 # disabled
# Data settings
checkpoint_path: str = MISSING
train_csv: str = MISSING
valid_csv: str = MISSING
resume_checkpoint: str = ''
sample_rate: int = 16000
seq_len: int = 16000
def flatten_cfg(cfg: Union[DictConfig, ListConfig]) -> dict:
"""
Recursively flattens a config into a flat dictionary compatible with
tensorboard's `add_hparams` function.
"""
out_dict = {}
if type(cfg) == ListConfig:
cfg = DictConfig({f"[{i}]": v for i, v in enumerate(cfg)})
for key in cfg:
if type(getattr(cfg, key)) in (int, str, bool, float):
out_dict[key] = getattr(cfg, key)
elif type(getattr(cfg, key)) in [DictConfig, ListConfig]:
out_dict = out_dict | {f"{key}{'.' if type(getattr(cfg, key)) == DictConfig else ''}{k}": v for k, v in flatten_cfg(getattr(cfg, key)).items()}
else: raise AssertionError
return out_dict
def train(rank, cfg: TrainConfig):
if cfg.distributed.n_gpus_per_node > 1:
init_process_group(backend=cfg.distributed.dist_backend, init_method=cfg.distributed.dist_url,
world_size=cfg.distributed.n_nodes*cfg.distributed.n_gpus_per_node, rank=rank)
device = torch.device(f'cuda:{rank:d}')
model = SashimiAR(cfg.model_cfg).to(device)
loss_fn = torch.nn.CrossEntropyLoss().to(device)
base_params = []
special_params = []
for nm, p in model.named_parameters():
if hasattr(p, '_optim'): special_params.append(p)
else: base_params.append(p)
logging.info(f"Initialized rank {rank}")
if rank == 0:
logging.getLogger().setLevel(logging.INFO)
logging.info(f"Model initialized as:\n {model}")
os.makedirs(cfg.checkpoint_path, exist_ok=True)
logging.info(f"checkpoints directory : {cfg.checkpoint_path}")
logging.info(f"Model has {sum([p.numel() for p in model.parameters()]):,d} parameters.")
steps = 0
if cfg.resume_checkpoint != '' and os.path.isfile(cfg.resume_checkpoint):
state_dict = torch.load(cfg.resume_checkpoint, map_location=device)
model.load_state_dict(state_dict['model_state_dict'])
steps = state_dict['steps'] + 1
last_epoch = state_dict['epoch']
print(f"Checkpoint loaded from {cfg.resume_checkpoint}. Resuming training from {steps} steps at epoch {last_epoch}")
else:
state_dict = None
last_epoch = -1
if cfg.distributed.n_gpus_per_node*cfg.distributed.n_nodes > 1:
if rank == 0: logging.info("Multi-gpu detected")
model = DDP(model, device_ids=[rank]).to(device)
optim = torch.optim.AdamW([
{'params': base_params},
{'params': special_params, 'lr': cfg.model_cfg.lr}
], cfg.start_lr, weight_decay=0)
if state_dict is not None: optim.load_state_dict(state_dict['optim_state_dict'])
train_df, valid_df = pd.read_csv(cfg.train_csv), pd.read_csv(cfg.valid_csv)
trainset = ARDataset(train_df.path.tolist())
train_sampler = DistributedSampler(trainset) if cfg.distributed.n_gpus_per_node*cfg.distributed.n_nodes > 1 else None
train_loader = DataLoader(trainset, num_workers=cfg.num_workers,
shuffle=False if cfg.distributed.n_gpus_per_node*cfg.distributed.n_nodes > 1 else True,
sampler=train_sampler,
batch_size=cfg.batch_size,
pin_memory=False,
drop_last=True,
collate_fn=ARCollate(cfg.model_cfg.mu_levels, cfg.seq_len))
if rank == 0:
validset = ARDataset(valid_df.path.tolist())
validation_loader = DataLoader(validset, num_workers=cfg.num_workers, shuffle=False,
sampler=None,
batch_size=cfg.batch_size,
pin_memory=False,
drop_last=True,
collate_fn=ARCollate(cfg.model_cfg.mu_levels, cfg.seq_len))
sw = SummaryWriter(os.path.join(cfg.checkpoint_path, 'logs'))
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, mode=cfg.plateau_mode, factor=cfg.plateau_factor,
patience=cfg.plateau_patience, min_lr=cfg.plateau_min_lr)
if state_dict is not None:
scheduler.load_state_dict(state_dict['scheduler_state_dict'])
if cfg.fp16:
scaler = GradScaler()
if state_dict is not None and 'scaler_state_dict' in state_dict:
scaler.load_state_dict(state_dict['scaler_state_dict'])
model.train()
max_epochs = math.ceil(cfg.max_steps/len(train_loader))
if rank == 0:
mb = master_bar(range(max(0, last_epoch), max_epochs))
sw.add_text('config', '```\n' + OmegaConf.to_yaml(cfg) + '\n```', global_step=steps)
smooth_loss = None
else: mb = range(max(0, last_epoch), max_epochs)
for epoch in mb:
if rank == 0:
start = time.time()
mb.write("Epoch: {}".format(epoch+1))
if cfg.distributed.n_gpus_per_node*cfg.distributed.n_nodes > 1:
train_sampler.set_epoch(epoch)
if rank == 0: pb = progress_bar(enumerate(train_loader), total=len(train_loader), parent=mb)
else: pb = enumerate(train_loader)
if steps > cfg.max_steps: break
for i, batch in pb:
if rank == 0: start_b = time.time()
x, y, lens = batch
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True) # (bs, seq_len)
lens = lens.to(device, non_blocking=True)
optim.zero_grad()
with torch.cuda.amp.autocast(enabled=cfg.fp16):
logits = model(x, lens) # (bs, seq_len, mu_levels)
# print(y.shape, logits.shape)
logits = logits.view(-1, cfg.model_cfg.mu_levels) # reshape for CE loss (N, C)
y_ = y.view(-1)
loss = loss_fn(logits, y_)
if cfg.fp16:
scaler.scale(loss).backward()
scaler.unscale_(optim)
if cfg.grad_clip > 0:
gnorm = torch.nn.utils.clip_grad.clip_grad_norm_(model.parameters(), cfg.grad_clip)
else:
gnorm = torch.nn.utils.clip_grad.clip_grad_norm_(model.parameters(), 1e8)
scaler.step(optim)
scaler.update()
else:
loss.backward()
if cfg.grad_clip > 0:
gnorm = torch.nn.utils.clip_grad.clip_grad_norm_(model.parameters(), cfg.grad_clip)
else:
gnorm = torch.nn.utils.clip_grad.clip_grad_norm_(model.parameters(), 1e8)
optim.step()
if rank == 0:
if smooth_loss is None: smooth_loss = float(loss.item())
else: smooth_loss = smooth_loss + 0.1*(float(loss.item()) - smooth_loss)
# STDOUT logging
if steps % cfg.stdout_interval == 0:
mb.write('steps : {:,d}, loss : {:4.3f}, sec/batch : {:4.3f}, peak mem: {:5.2f}GB'. \
format(steps, loss.item(), time.time() - start_b, torch.cuda.max_memory_allocated()/1e9))
mb.child.comment = 'steps : {:,d}, loss : {:4.3f}, sec/batch : {:4.3f}'. \
format(steps, loss.item(), time.time() - start_b)
# checkpointing
if steps % cfg.checkpoint_interval == 0 and steps != 0:
checkpoint_path = f"{cfg.checkpoint_path}/ckpt_{steps:08d}.pt"
torch.save({
'model_state_dict': (model.module if cfg.distributed.n_gpus_per_node*cfg.distributed.n_nodes > 1 else model).state_dict(),
'optim_state_dict': optim.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'scaler_state_dict': (scaler.state_dict() if cfg.fp16 else None),
'steps': steps,
'epoch': epoch,
'cfg_yaml': OmegaConf.to_yaml(cfg)
}, checkpoint_path)
logging.info(f"Saved checkpoint to {checkpoint_path}")
# Tensorboard summary logging
if steps % cfg.summary_interval == 0:
sw.add_scalar("training/loss_smooth", smooth_loss, steps)
sw.add_scalar("training/loss_raw", loss.item(), steps)
sw.add_scalar("opt/lr", float(optim.param_groups[0]['lr']), steps)
sw.add_scalar('opt/grad_norm', float(gnorm), steps)
# Validation
if steps % cfg.validation_interval == 0 and steps != 0:
model.eval()
loss_fn.eval()
torch.cuda.empty_cache()
val_err_tot = 0
flat_logits = []
flat_lbls = []
with torch.no_grad():
for j, batch in progress_bar(enumerate(validation_loader), total=len(validation_loader), parent=mb):
x, y, lens = batch
y = y.to(device)
lens = lens.to(device)
logits = model(x.to(device), lens)
logits = logits.view(-1, cfg.model_cfg.mu_levels)
y_ = y.view(-1)
val_err_tot += loss_fn(logits, y_)
flat_logits.append(logits.cpu()) # (bs*seq_len, mu_levels)
flat_lbls.append(y_.cpu()) # bs*seq_len
val_err = val_err_tot / (j+1)
flat_logits = torch.cat(flat_logits, dim=0)
flat_lbls = torch.cat(flat_lbls, dim=0)
preds = flat_logits.argmax(dim=-1)
acc = (preds == flat_lbls).sum()/len(flat_lbls)
sw.add_scalar('validation/acc', float(acc), steps)
sw.add_scalar("validation/loss", val_err, steps)
mb.write(f"validation run complete at {steps:,d} steps. validation loss: {val_err:5.4f}")
# trust we must do this to stop memory leaks
del flat_logits
del flat_lbls
gc.collect()
scheduler.step(val_err)
model.train()
loss_fn.train()
sw.add_scalar("memory/max_allocated_gb", torch.cuda.max_memory_allocated()/1e9, steps)
sw.add_scalar("memory/max_reserved_gb", torch.cuda.max_memory_reserved()/1e9, steps)
torch.cuda.reset_peak_memory_stats()
torch.cuda.reset_accumulated_memory_stats()
torch.cuda.empty_cache()
gc.collect() # why not twice to be sure
steps += 1
if steps > cfg.max_steps:
print("FINISHED TRAINING")
break
if rank == 0:
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
sw.add_hparams(flatten_cfg(cfg), metric_dict={'validation/loss': val_err}, run_name=f'run-{cfg.checkpoint_path}')
print("Training completed!")
def main():
print('Initializing Training Process..')
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(usage='\n' + '-'*10 + ' Default config ' + '-'*10 + '\n' +
str(OmegaConf.to_yaml(OmegaConf.structured(TrainConfig))))
a = parser.parse_known_args()
override_cfg = OmegaConf.from_cli()
base_cfg = OmegaConf.structured(TrainConfig)
cfg: TrainConfig = OmegaConf.merge(base_cfg, override_cfg)
logging.info(f"Running with config:\n {OmegaConf.to_yaml(cfg)}")
torch.backends.cudnn.benchmark = True
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(cfg.seed)
if cfg.distributed.n_gpus_per_node > torch.cuda.device_count():
raise AssertionError((f" Specified n_gpus_per_node ({cfg.distributed.n_gpus_per_node})"
f" must be less than or equal to cuda device count ({torch.cuda.device_count()}) "))
with open_dict(cfg):
cfg.batch_size_per_gpu = int(cfg.batch_size / cfg.distributed.n_gpus_per_node)
if cfg.batch_size % cfg.distributed.n_gpus_per_node != 0:
logging.warn(("Batch size does not evenly divide among GPUs in a node. "
"Likely unbalanced loads will occur."))
logging.info(f'Batch size per GPU : {cfg.batch_size_per_gpu}')
if cfg.distributed.n_gpus_per_node*cfg.distributed.n_nodes > 1:
mp.spawn(train, nprocs=cfg.distributed.n_gpus_per_node, args=(cfg,))
else:
train(0, cfg)
if __name__ == '__main__':
main()
| 15,155 | 42.179487 | 155 | py |
simple-sashimi | simple-sashimi-master/sashimi/model.py | """
SaShiMi backbone.
Use this backbone in your own models. You'll also need to copy over the
standalone S4 layer, which can be found at
`state-spaces/src/models/sequence/ss/standalone/s4.py`.
It's Raw! Audio Generation with State-Space Models
Karan Goel, Albert Gu, Chris Donahue, Christopher Re.
Adapted from https://github.com/HazyResearch/state-spaces/blob/diffwave/sashimi/sashimi.py
"""
import warnings
import math
import numpy as np
import torch
from torch import Tensor
import torch.nn as nn
import torchaudio
import torch.nn.functional as F
from einops import rearrange
from fastprogress.fastprogress import progress_bar
from sashimi.s4 import S4, LinearActivation
from sashimi.config import AutoregressiveConfig, DiffusionConfig
def swish(x):
return x * torch.sigmoid(x)
def calc_diffusion_step_embedding(diffusion_steps, diffusion_step_embed_dim_in):
"""
Embed a diffusion step $t$ into a higher dimensional space
E.g. the embedding vector in the 128-dimensional space is
[sin(t * 10^(0*4/63)), ... , sin(t * 10^(63*4/63)), cos(t * 10^(0*4/63)), ... , cos(t * 10^(63*4/63))]
Taken from https://github.com/philsyn/DiffWave-Vocoder
Parameters:
diffusion_steps (torch.long tensor, shape=(batchsize, 1)):
diffusion steps for batch data
diffusion_step_embed_dim_in (int, default=128):
dimensionality of the embedding space for discrete diffusion steps
Returns:
the embedding vectors (torch.tensor, shape=(batchsize, diffusion_step_embed_dim_in)):
"""
assert diffusion_step_embed_dim_in % 2 == 0
half_dim = diffusion_step_embed_dim_in // 2
_embed = np.log(10000) / (half_dim - 1)
_embed = torch.exp(torch.arange(half_dim) * -_embed).to(diffusion_steps.device)
_embed = diffusion_steps * _embed
diffusion_step_embed = torch.cat((torch.sin(_embed), torch.cos(_embed)), 1)
return diffusion_step_embed
class Conv(nn.Module):
"""
Dilated conv layer with kaiming_normal initialization
from https://github.com/ksw0306/FloWaveNet/blob/master/modules.py
"""
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):
super(Conv, self).__init__()
self.padding = dilation * (kernel_size - 1) // 2
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding)
self.conv = nn.utils.weight_norm(self.conv)
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, x):
out = self.conv(x)
return out
class ZeroConv1d(nn.Module):
"""
Conv1x1 layer with zero initialization
From https://github.com/ksw0306/FloWaveNet/blob/master/modules.py but the scale parameter is removed
"""
def __init__(self, in_channel, out_channel):
super(ZeroConv1d, self).__init__()
self.conv = nn.Conv1d(in_channel, out_channel, kernel_size=1, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
def forward(self, x):
out = self.conv(x)
return out
class DownPool(nn.Module):
def __init__(self, d_input, expand, pool):
super().__init__()
self.d_output = d_input * expand
self.pool = pool
self.linear = LinearActivation(
d_input * pool,
self.d_output,
transposed=True,
weight_norm=True,
)
def forward(self, x, **kwargs):
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.pool)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
if x is None: return None, state
state.append(x)
if len(state) == self.pool:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
x = x.unsqueeze(-1)
x = self.linear(x)
x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *args, **kwargs):
return []
class UpPool(nn.Module):
def __init__(self, d_input, expand, pool, causal=True):
super().__init__()
self.d_output = d_input // expand
self.pool = pool
self.causal = causal
self.linear = LinearActivation(
d_input,
self.d_output * pool,
transposed=True,
weight_norm=True,
)
def forward(self, x, **kwargs):
x = self.linear(x)
if self.causal:
# Shift to ensure causality
x = F.pad(x[..., :-1], (1, 0))
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.pool)
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
x = x.unsqueeze(-1)
x = self.linear(x)
x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.pool)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.pool), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
class FFBlock(nn.Module):
def __init__(self, d_model, expand=2, dropout=0.0):
"""
Feed-forward block.
Args:
d_model: dimension of input
expand: expansion factor for inverted bottleneck
dropout: dropout rate
"""
super().__init__()
input_linear = LinearActivation(
d_model,
d_model * expand,
transposed=True,
activation='gelu',
activate=True,
)
dropout = nn.Dropout2d(dropout) if dropout > 0.0 else nn.Identity()
output_linear = LinearActivation(
d_model * expand,
d_model,
transposed=True,
activation=None,
activate=False,
)
self.ff = nn.Sequential(
input_linear,
dropout,
output_linear,
)
def forward(self, x, **kwargs):
return self.ff(x), None
def default_state(self, *args, **kwargs):
return None
def step(self, x, state, **kwargs):
# expects: (B, D, L)
return self.ff(x.unsqueeze(-1)).squeeze(-1), state
class ResidualBlock(nn.Module):
def __init__(
self,
d_model,
layer,
dropout=0.0,
):
"""
Residual S4 block.
Args:
d_model: dimension of the model
bidirectional: use bidirectional S4 layer
glu: use gated linear unit in the S4 layer
dropout: dropout rate
"""
super().__init__()
self.layer = layer
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout2d(dropout) if dropout > 0.0 else nn.Identity()
def forward(self, x, **kwargs):
"""
Input x is shape (B, d_input, L)
"""
z = x
# Prenorm
z = self.norm(z.transpose(-1, -2)).transpose(-1, -2)
# Apply layer: we ignore the state input and output for training
z, _ = self.layer(z)
# Dropout on the output of the layer
z = self.dropout(z)
# Residual connection
x = z + x
return x, None
def default_state(self, *args, **kwargs):
return self.layer.default_state(*args, **kwargs)
def step(self, x, state, **kwargs):
z = x
# Prenorm
z = self.norm(z)
# Apply layer
z, state = self.layer.step(z, state, **kwargs)
# Residual connection
x = z + x
return x, state
class DiffWaveS4Block(nn.Module):
"""
Modified DiffWave block that uses S4.
Taken from https://github.com/philsyn/DiffWave-Vocoder
"""
def __init__(self,
d_model,
diffusion_step_embed_dim_out=512,
unconditional=False,
mel_upsample=[16, 16],
l_max=16000,
postact=None,
tie_state=False
):
super().__init__()
self.d_model = d_model
# the layer-specific fc for diffusion step embedding
self.fc_t = nn.Linear(diffusion_step_embed_dim_out, self.d_model)
self.layer = S4(
d_model,
bidirectional=True,
hurwitz=True, # use the Hurwitz parameterization for stability
tie_state=tie_state, # tie SSM parameters across d_state in the S4 layer
trainable={
'dt': True,
'A': True,
'P': True,
'B': True,
}, # train all internal S4 parameters
l_max=l_max,
postact=postact
)
self.norm = nn.LayerNorm(d_model)
self.unconditional = unconditional
if not self.unconditional:
# add mel spectrogram upsampler and conditioner conv1x1 layer
self.upsample_conv2d = torch.nn.ModuleList()
for s in mel_upsample:
conv_trans2d = torch.nn.ConvTranspose2d(1, 1, (3, 2 * s), padding=(1, s // 2), stride=(1, s))
conv_trans2d = torch.nn.utils.weight_norm(conv_trans2d)
torch.nn.init.kaiming_normal_(conv_trans2d.weight)
self.upsample_conv2d.append(conv_trans2d)
self.mel_conv = Conv(80, self.d_model, kernel_size=1) # 80 is mel bands
def forward(self, x, diffusion_step_embed, mel_spec=None):
y = x
B, C, L = x.shape
assert C == self.d_model
y = self.norm(y.transpose(-1, -2)).transpose(-1, -2)
# add in diffusion step embedding
part_t = self.fc_t(diffusion_step_embed)
y = y + part_t.unsqueeze(-1)
# S4 layer
y, _ = self.layer(y)
# add mel spectrogram as (local) conditioner
if mel_spec is not None:
assert not self.unconditional
# Upsample spectrogram to size of audio
mel_spec = torch.unsqueeze(mel_spec, dim=1)
mel_spec = F.leaky_relu(self.upsample_conv2d[0](mel_spec), 0.4)
mel_spec = F.leaky_relu(self.upsample_conv2d[1](mel_spec), 0.4)
mel_spec = torch.squeeze(mel_spec, dim=1)
assert(mel_spec.size(2) >= L)
if mel_spec.size(2) > L:
mel_spec = mel_spec[:, :, :L]
mel_spec = self.mel_conv(mel_spec)
y = y + mel_spec
# Residual
y = x + y
return y, None
class Sashimi(nn.Module):
def __init__(
self,
d_model=64,
n_layers=8,
pool=[4, 4],
expand=2,
ff=2,
bidirectional=False,
glu=True,
unet=False,
diffwave=False,
dropout=0.0,
**kwargs,
):
"""
SaShiMi model backbone.
Args:
d_model: dimension of the model. We generally use 64 for all our experiments.
n_layers: number of (Residual (S4) --> Residual (FF)) blocks at each pooling level.
We use 8 layers for our experiments, although we found that increasing layers even further generally
improves performance at the expense of training / inference speed.
pool: pooling factor at each level. Pooling shrinks the sequence length at lower levels.
We experimented with a pooling factor of 4 with 1 to 4 tiers of pooling and found 2 tiers to be best.
It's possible that a different combination of pooling factors and number of tiers may perform better.
expand: expansion factor when pooling. Features are expanded (i.e. the model becomes wider) at lower levels of the architecture.
We generally found 2 to perform best (among 2, 4).
ff: expansion factor for the FF inverted bottleneck. We generally found 2 to perform best (among 2, 4).
bidirectional: use bidirectional S4 layers. Bidirectional layers are suitable for use with non-causal models
such as diffusion models like DiffWave.
glu: use gated linear unit in the S4 layers. Adds parameters and generally improves performance.
unet: use a unet-like architecture, adding (Residual (S4) --> Residual (FF)) layers before downpooling.
All else fixed, this slows down inference (and slightly slows training), but generally improves performance.
We use this variant when dropping in SaShiMi into diffusion models, and this should generally be preferred
for non-autoregressive models.
diffwave: switch to DiffWave model with SaShiMi backbone. We use this variant for our diffusion
models. Note that S4 is bidirectional by default in this variant, and we recommend switching
on the `unet` argument as well. Additional kwargs for
- `diffusion_step_embed_dim_in` (default 128)
- `diffusion_step_embed_dim_mid` (default 512)
- `diffusion_step_embed_dim_out` (default 512)
- `unconditional` (default False)
- `mel_upsample` (default [16, 16])
can be passed in to control the SaShiMi diffusion model.
dropout: dropout rate. Default to 0.0, since we haven't found settings where SaShiMi overfits.
"""
super().__init__()
self.d_model = H = d_model
self.unet = unet
self.diffwave = diffwave
# Bidirectional S4 layers are always used in DiffWave
bidirectional = bidirectional or diffwave
if self.diffwave and not self.unet:
warnings.warn("DiffWave is not recommended without UNet. Consider using UNet instead.")
def s4_block(dim, kwargs):
layer = S4(
d_model=dim,
d_state=64,
bidirectional=bidirectional,
postact='glu' if glu else None,
dropout=dropout,
transposed=True,
hurwitz=True, # use the Hurwitz parameterization for stability
tie_state=True, # tie SSM parameters across d_state in the S4 layer
# trainable={
# 'dt': True,
# 'A': True,
# 'P': True,
# 'B': True,
# }, # train all internal S4 parameters
trainable=kwargs['trainable'],
lr=kwargs['lr'],
l_max=kwargs['l_max'],
)
return ResidualBlock(
d_model=dim,
layer=layer,
dropout=dropout,
)
def ff_block(dim):
layer = FFBlock(
d_model=dim,
expand=ff,
dropout=dropout,
)
return ResidualBlock(
d_model=dim,
layer=layer,
dropout=dropout,
)
if diffwave:
# Setup for DiffWave SaShiMi model
# Borrows code from https://github.com/philsyn/DiffWave-Vocoder
self.diffusion_step_embed_dim_in = kwargs.get('diffusion_step_embed_dim_in', 128)
self.diffusion_step_embed_dim_mid = kwargs.get('diffusion_step_embed_dim_mid', 512)
self.diffusion_step_embed_dim_out = kwargs.get('diffusion_step_embed_dim_out', 512)
in_channels = 1
out_channels = 1
# Initial conv1x1 with relu
self.init_conv = nn.Sequential(Conv(in_channels, d_model, kernel_size=1), nn.ReLU())
# the shared two fc layers for diffusion step embedding
self.fc_t1 = nn.Linear(self.diffusion_step_embed_dim_in, self.diffusion_step_embed_dim_mid)
self.fc_t2 = nn.Linear(self.diffusion_step_embed_dim_mid, self.diffusion_step_embed_dim_out)
# Final conv1x1 -> relu -> zeroconv1x1
self.final_conv = nn.Sequential(
Conv(d_model, d_model, kernel_size=1),
nn.ReLU(),
ZeroConv1d(d_model, out_channels),
)
def s4_block(dim, kwargs):
return DiffWaveS4Block(
d_model=dim,
diffusion_step_embed_dim_out=self.diffusion_step_embed_dim_out,
unconditional=kwargs.get('unconditional', True),
mel_upsample=kwargs.get('mel_upsample', [16, 16]),
l_max=kwargs['l_max'],
postact='glu' if glu else None,
tie_state=kwargs['tie_state']
)
# Down blocks
d_layers = []
for p in pool:
if unet:
# Add blocks in the down layers
for _ in range(n_layers):
d_layers.append(s4_block(H, kwargs))
if ff > 0: d_layers.append(ff_block(H))
# Add sequence downsampling and feature expanding
d_layers.append(DownPool(H, expand, p))
H *= expand
# Center block
c_layers = []
for _ in range(n_layers):
c_layers.append(s4_block(H, kwargs))
if ff > 0: c_layers.append(ff_block(H))
# Up blocks
u_layers = []
for p in pool[::-1]:
block = []
H //= expand
block.append(UpPool(H * expand, expand, p, causal=not bidirectional))
for _ in range(n_layers):
block.append(s4_block(H, kwargs))
if ff > 0: block.append(ff_block(H))
u_layers.append(nn.ModuleList(block))
self.d_layers = nn.ModuleList(d_layers)
self.c_layers = nn.ModuleList(c_layers)
self.u_layers = nn.ModuleList(u_layers)
self.norm = nn.LayerNorm(H)
assert H == d_model
def forward(self, x, state=None, mel_spec=None):
"""
input: (batch, length, d_input)
output: (batch, length, d_output)
"""
if self.diffwave:
audio, diffusion_steps = x
x = audio
# BLD -> BDL
x = x.transpose(1, 2)
x = self.init_conv(x)
diffusion_step_embed = calc_diffusion_step_embedding(
diffusion_steps,
self.diffusion_step_embed_dim_in,
)
diffusion_step_embed = swish(self.fc_t1(diffusion_step_embed))
diffusion_step_embed = swish(self.fc_t2(diffusion_step_embed))
# Additional kwargs to pass onto the DiffWaveS4Block
layer_kwargs = dict(diffusion_step_embed=diffusion_step_embed, mel_spec=mel_spec)
else:
# BLD -> BDL
x = x.transpose(1, 2)
# No additional kwargs to pass onto the S4 & FF blocks
layer_kwargs = dict()
# Down blocks
outputs = []
outputs.append(x)
for layer in self.d_layers:
x, _ = layer(x, **layer_kwargs)
outputs.append(x)
# Center block
for layer in self.c_layers:
x, _ = layer(x, **layer_kwargs)
x = x + outputs.pop() # add a skip connection to the last output of the down block
# Up blocks
for block in self.u_layers:
if self.unet:
for layer in block:
x, _ = layer(x, **layer_kwargs)
x = x + outputs.pop() # skip connection
else:
for layer in block:
x, _ = layer(x, **layer_kwargs)
if isinstance(layer, UpPool):
# Before modeling layer in the block
x = x + outputs.pop()
outputs.append(x)
x = x + outputs.pop() # add a skip connection from the input of the modeling part of this up block
# feature projection
x = x.transpose(1, 2) # (batch, length, expand)
x = self.norm(x)
if self.diffwave:
x = self.final_conv(x.transpose(1, 2)).transpose(1, 2)
return x, None # required to return a state
def default_state(self, *args, **kwargs):
layers = list(self.d_layers) + list(self.c_layers) + [layer for block in self.u_layers for layer in block]
return [layer.default_state(*args, **kwargs) for layer in layers]
def step(self, x, state, **kwargs):
"""
input: (batch, d_input)
output: (batch, d_output)
"""
# States will be popped in reverse order for convenience
state = state[::-1]
# Down blocks
outputs = [] # Store all layers for SaShiMi
next_state = []
for layer in self.d_layers:
outputs.append(x)
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
if x is None: break
# Center block
if x is None:
# Skip computations since we've downsized
skipped = len(self.d_layers) - len(outputs)
for _ in range(skipped + len(self.c_layers)):
next_state.append(state.pop())
if self.unet:
for i in range(skipped):
next_state.append(state.pop())
u_layers = list(self.u_layers)[skipped//3:]
else:
for i in range(skipped):
for _ in range(len(self.u_layers[i])):
next_state.append(state.pop())
u_layers = list(self.u_layers)[skipped:]
else:
outputs.append(x)
for layer in self.c_layers:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
x = x + outputs.pop()
u_layers = self.u_layers
for block in u_layers:
if self.unet:
for layer in block:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
x = x + outputs.pop()
else:
for layer in block:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
if isinstance(layer, UpPool):
# Before modeling layer in the block
x = x + outputs.pop()
outputs.append(x)
x = x + outputs.pop()
# feature projection
x = self.norm(x)
return x, next_state
def setup_rnn(self, mode='dense'):
"""
Convert the SaShiMi model to a RNN for autoregressive generation.
Args:
mode: S4 recurrence mode. Using `diagonal` can speed up generation by 10-20%.
`linear` should be faster theoretically but is slow in practice since it
dispatches more operations (could benefit from fused operations).
Note that `diagonal` could potentially be unstable if the diagonalization is numerically unstable
(although we haven't encountered this case in practice), while `dense` should always be stable.
"""
assert mode in ['dense', 'diagonal', 'linear']
for module in self.modules():
if hasattr(module, 'setup_step'): module.setup_step(mode)
class SashimiAR(nn.Module):
supports_nll: bool = True
def __init__(self, cfg: AutoregressiveConfig) -> None:
super().__init__()
self.sashimi = Sashimi(**cfg)
self.mu_embedder = nn.Embedding(cfg.mu_levels, cfg.d_model)
self.head = nn.Linear(cfg.d_model, cfg.mu_levels)
self.cfg = cfg
def forward(self, x: Tensor, lengths: Tensor) -> Tensor:
""" Accepts mu-law encoded batch `x` (bs, seq_len), int64, returns float logits of same shape """
x = self.mu_embedder(x) # (bs, seq_len, dim)
y, _ = self.sashimi(x)
logits = self.head(y) # (bs, seq_len, mu_levels)
return logits
@torch.inference_mode()
def unconditional_generate(self, N: int, nucleus_p=1.0, progress=True, mb=None) -> Tensor:
""" Generate `N` audio samples, returning a tensor of shape (N, 16000) """
mu_tfm = torchaudio.transforms.MuLawEncoding(self.cfg.mu_levels)
mu_detfm = torchaudio.transforms.MuLawDecoding(self.cfg.mu_levels)
bs = N
device = next(self.parameters()).device
x = torch.zeros(bs, 1)
x = mu_tfm(x).to(device)
x = self.mu_embedder(x)
x = x.squeeze(1)
self.sashimi.setup_rnn() # setup S4 layers in recurrent mode
# alternately, use sashimi.setup_rnn('diagonal') for a speedup
# Run recurrence
ys = []
state = self.sashimi.default_state(*x.shape[:1], device=device)
if progress: pb = progress_bar(range(16000), parent=mb)
else: pb = range(16000)
for i in pb:
y_, state = self.sashimi.step(x, state)
logits = self.head(y_)
## Special sampling methods come in here.
# nucleus sampling
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1).detach()
sorted_probs, sorted_inds = torch.sort(probs, dim=-1, descending=True)
cs = torch.cumsum(sorted_probs, dim=-1)
sorted_indices_to_remove = cs > nucleus_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for b in range(bs):
indices_to_remove = sorted_inds[b, sorted_indices_to_remove[b]]
probs[b, indices_to_remove] = 0.0
# Renormalize:
probs = probs / probs.sum(dim=-1)[:, None]
# Sample next sample:
ix = torch.multinomial(probs, num_samples=1).squeeze(1)
next_input = ix
if progress: pb.comment = f"sampled token: {int(next_input[0])}"
x = self.mu_embedder(next_input)
ys.append(next_input.cpu())
ys = torch.stack(ys, dim=1) # ys.shape == x.shape
audio = mu_detfm(ys)
return audio
@torch.inference_mode()
def nll(self, wavs: Tensor, progress=True, mb=None) -> Tensor:
""" Gets likelihood for `wavs` (N, 16000), returning per-sample neg log likelihoods (N,) """
mu_tfm = torchaudio.transforms.MuLawEncoding(self.cfg.mu_levels)
device = wavs.device
bs = wavs.shape[0]
x = F.pad(wavs, (1,0)) # (bs, 16001)
x = mu_tfm(x).to(device)
# keep ground-truth labels
gt_x = x.clone()
x = x[:, 0] # (bs, 1)
x = self.mu_embedder(x)
x = x.squeeze(1)
logprobs = torch.empty_like(wavs).double().fill_(torch.nan) # base e logprobs
self.sashimi.setup_rnn() # setup S4 layers in recurrent mode
# alternately, use sashimi.setup_rnn('diagonal') for a speedup
# Run recurrence
state = self.sashimi.default_state(*x.shape[:1], device=device)
if progress: pb = progress_bar(range(16000), parent=mb)
else: pb = range(16000)
for i in pb:
y_, state = self.sashimi.step(x, state)
logits = self.head(y_)
probs = F.softmax(logits.double(), dim=-1)
next_input = gt_x[:, i+1]
logprobs[:, i] = torch.log(torch.gather(probs, -1, next_input[..., None])).squeeze(-1)
if progress: pb.comment = f"sampled token: {int(next_input[0])} | logprob: {sum(logprobs[0, :i])}"
x = self.mu_embedder(next_input)
nll = -logprobs
mean_nll = nll.mean(dim=-1)
return mean_nll # unreduced NLL
class SashimiDiffWave(nn.Module):
def __init__(self, cfg: DiffusionConfig) -> None:
super().__init__()
self.sashimi = Sashimi(**cfg)
print(f"Initialized DiffWave Sashimi with {sum([p.numel() for p in self.parameters()]):,d} parameters.")
def forward(self, x:Tensor, diffusion_steps: Tensor, spectrogram=None) -> Tensor:
""" Accepts raw waveforms in range [-1, 1] `x` of shape (bs, seq_len) and
`diffusion_steps` of shape (bs,). `spectrogram` is unused and should not be given.
Only done for compatibility with diffwave training code.
"""
# internally we must add extra dimensions
x = x[..., None] # (bs, seq_len, 1)
diffusion_steps = diffusion_steps[..., None] # (bs, 1)
y, _ = self.sashimi((x, diffusion_steps)) # y is (bs, seq_len, 1)
y = y.squeeze(-1)
return y
if __name__ == '__main__':
from fastprogress.fastprogress import progress_bar
print("Testing autoregressive method")
# Example: SaShiMi for autoregressive modeling
model = Sashimi(n_layers=2).cuda()
# Print parameter count
print(f"Total parameters = {sum(p.numel() for p in model.parameters()):,d}")
model.eval()
with torch.no_grad():
# Forward in convolutional mode: used for training SaShiMi
x = torch.randn(3, 10240, 64).cuda()
y, _ = model(x)
# Setup the SaShiMi RNN
model.setup_rnn('diagonal')
# Forward in recurrent mode: used for autoregressive generation at inference time
ys = []
state = model.default_state(*x.shape[:1], device='cuda')
for i in progress_bar(range(10240)):
y_, state = model.step(x[:, i], state)
ys.append(y_.detach().cpu())
ys = torch.stack(ys, dim=1)
print(y.shape, ys.shape)
print("Testing diffusion method")
# Example: SaShiMi for diffusion modeling
model = Sashimi(n_layers=2, diffwave=True, unet=True).cuda()
# Print parameter count
print(sum(p.numel() for p in model.parameters()))
model.eval()
with torch.no_grad():
# Forward (only) in convolutional mode
x = torch.randn(3, 10240, 1).cuda() # (bs, seq_len, 1)
steps = torch.randint(0, 4, (3, 1)).cuda()
y, _ = model((x, steps))
print(y.shape)
| 30,791 | 34.638889 | 140 | py |
simple-sashimi | simple-sashimi-master/sashimi/dataset.py | from typing import List, Tuple
import torch
from torch import Tensor
import torch.nn.functional as F
import torchaudio
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
import logging
import random
from glob import glob
from torch.utils.data.distributed import DistributedSampler
import os
import numpy as np
import pandas as pd
class ARDataset(Dataset):
def __init__(self, paths):
super().__init__()
self.filenames = paths
logging.info(f"Dataset: found {len(self.filenames):,d} utterance files.")
def __len__(self): return len(self.filenames)
def __getitem__(self, idx: int) -> Tensor:
audio_filename = self.filenames[idx]
signal, _ = torchaudio.load(audio_filename)
out = signal.squeeze(0).clamp(-1.0, 1.0)
return out
class ARCollate():
def __init__(self, mu_quant_bins=256, seq_len=16000) -> None:
self.seq_len = seq_len
self.mu_tfm = torchaudio.transforms.MuLawEncoding(mu_quant_bins)
def __call__(self, xs: List[Tensor]) -> Tuple[Tensor]:
wavs = torch.zeros(len(xs), self.seq_len+1, dtype=torch.float)
lengths = []
for i in range(len(xs)):
l = xs[i].shape[0]
if l < self.seq_len + 1: # need to add one for next-token prediction
lengths.append(l)
wavs[i, :l] = xs[i][:l]
else:
start = random.randint(0, l - self.seq_len)
wavs[i] = xs[i][start:start + self.seq_len]
lengths.append(self.seq_len)
signal = self.mu_tfm(wavs)
lengths = torch.tensor(lengths).long()
x = signal[:, :-1]
y = signal[:, 1:]
return x, y, lengths
# -------------------------------
# Diffusion based code,
# adapted from https://github.com/lmnt-com/diffwave
class UnconditionalDataset(Dataset):
def __init__(self, df_paths):
super().__init__()
self.filenames = []
for path in df_paths:
df = pd.read_csv(path)
self.filenames += df.path.tolist()
# self.filenames += glob(f'{path}/**/*.wav', recursive=True)
print(f"Dataset initialied with {len(self.filenames):,d} utterances.")
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
audio_filename = self.filenames[idx]
spec_filename = f'{audio_filename}.spec.npy'
signal, _ = torchaudio.load(audio_filename)
out = signal.squeeze(0).clamp(-1.0, 1.0)
return {
'audio': out,
'spectrogram': None
}
class DiffusionCollator:
def __init__(self, params):
self.params = params
def collate(self, minibatch):
for record in minibatch:
if self.params.unconditional:
# Filter out records that aren't long enough.
if len(record['audio']) < self.params.audio_len:
if self.params.unconditional:
n_pad = self.params.audio_len - len(record['audio'])
record['audio'] = F.pad(record['audio'], (0, n_pad), mode='constant', value=0)
else:
del record['spectrogram']
del record['audio']
continue
start = random.randint(0, record['audio'].shape[-1] - self.params.audio_len)
end = start + self.params.audio_len
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant')
else: raise NotImplementedError("Only unconditional sashimi available.")
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
if self.params.unconditional:
return {
'audio': torch.from_numpy(audio),
'spectrogram': None,
}
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
return {
'audio': torch.from_numpy(audio),
'spectrogram': torch.from_numpy(spectrogram),
}
def from_path(data_dirs, params, is_distributed=False):
dataset = UnconditionalDataset(data_dirs)
return torch.utils.data.DataLoader(
dataset,
batch_size=params.batch_size,
collate_fn=DiffusionCollator(params).collate,
shuffle=not is_distributed,
num_workers=os.cpu_count(),
sampler=DistributedSampler(dataset) if is_distributed else None,
pin_memory=True,
drop_last=True) | 5,266 | 39.206107 | 121 | py |
simple-sashimi | simple-sashimi-master/sashimi/s4.py | """
Standalone version of Structured (Sequence) State Space (S4) model.
Adapted from https://github.com/HazyResearch/state-spaces/blob/diffwave/src/models/sequence/ss/standalone/s4.py
"""
import logging
import math
from functools import partial, wraps
from typing import Any, Callable, Optional, Union
import numpy as np
from scipy import special as ss
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
import opt_einsum as oe
contract = oe.contract
contract_expression = oe.contract_expression
log = logging.getLogger(__name__)
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# # this ensures all logging levels get marked with the rank zero decorator
# # otherwise logs would get multiplied for each GPU process in multi-GPU setup
# for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
# setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
log = get_logger(__name__)
""" Cauchy kernel """
try: # Try CUDA extension
from extensions.cauchy.cauchy import cauchy_mult
has_cauchy_extension = True
except:
log.warn(
"CUDA extension for cauchy multiplication not found. Install by going to extensions/cauchy/ and running `python setup.py install`. This should speed up end-to-end training by 10-50%"
)
has_cauchy_extension = False
try: # Try pykeops
import pykeops
from pykeops.torch import Genred
has_pykeops = True
def cauchy_conj(v, z, w):
""" Pykeops version """
expr_num = 'z * ComplexReal(v) - Real2Complex(Sum(v * w))'
expr_denom = 'ComplexMult(z-w, z-Conj(w))'
cauchy_mult = Genred(
f'ComplexDivide({expr_num}, {expr_denom})',
# expr_num,
# expr_denom,
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
dtype='float32' if v.dtype == torch.cfloat else 'float64',
)
v, z, w = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = 2*cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
except ImportError:
has_pykeops = False
if not has_cauchy_extension:
log.error(
"Falling back on slow Cauchy kernel. Install at least one of pykeops or the CUDA extension for efficiency."
)
def cauchy_slow(v, z, w):
"""
v, w: (..., N)
z: (..., L)
returns: (..., L)
"""
cauchy_matrix = v.unsqueeze(-1) / (z.unsqueeze(-2) - w.unsqueeze(-1)) # (... N L)
return torch.sum(cauchy_matrix, dim=-2)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
_c2r = torch.view_as_real
_r2c = torch.view_as_complex
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
if tuple(map(int, torch.__version__.split('.')[:2])) >= (1, 10):
_resolve_conj = lambda x: x.conj().resolve_conj()
else:
_resolve_conj = lambda x: x.conj()
""" simple nn.Module components """
def Activation(activation=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU()
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid()
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
def get_initializer(name, activation=None):
if activation in [ None, 'id', 'identity', 'linear', 'modrelu' ]:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
elif activation in ['gelu', 'swish', 'silu']:
nonlinearity = 'relu' # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(f"get_initializer: activation {activation} not supported")
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(f"get_initializer: initializer type {name} not supported")
return initializer
class TransposedLinear(nn.Module):
""" Linear module on the second-to-last dimension """
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # nn.Linear default init
# nn.init.kaiming_uniform_(self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output, 1))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
else:
self.bias = 0.0
def forward(self, x):
return contract('... u l, v u -> ... v l', x, self.weight) + self.bias
def LinearActivation(
d_input, d_output, bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
""" Returns a linear nn.Module with control over axes order, initialization, and activation """
# Construct core module
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == 'glu': d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, dim=-2 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
""" Misc functional utilities """
def krylov(L, A, b, c=None, return_power=False):
"""
Compute the Krylov matrix (b, Ab, A^2b, ...) using the squaring trick.
If return_power=True, return A^{L-1} as well
"""
# TODO There is an edge case if L=1 where output doesn't get broadcasted, which might be an issue if caller is expecting broadcasting semantics... can deal with it if it arises
x = b.unsqueeze(-1) # (..., N, 1)
A_ = A
AL = None
if return_power:
AL = torch.eye(A.shape[-1], dtype=A.dtype, device=A.device)
_L = L-1
done = L == 1
# loop invariant: _L represents how many indices left to compute
while not done:
if return_power:
if _L % 2 == 1: AL = A_ @ AL
_L //= 2
# Save memory on last iteration
l = x.shape[-1]
if L - l <= l:
done = True
_x = x[..., :L-l]
else: _x = x
_x = A_ @ _x
x = torch.cat([x, _x], dim=-1) # there might be a more efficient way of ordering axes
if not done: A_ = A_ @ A_
assert x.shape[-1] == L
if c is not None:
x = torch.einsum('...nl, ...n -> ...l', x, c)
x = x.contiguous() # WOW!!
if return_power:
return x, AL
else:
return x
def power(L, A, v=None):
""" Compute A^L and the scan sum_i A^i v_i
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
""" HiPPO utilities """
def embed_c2r(A):
A = rearrange(A, '... m n -> ... m () n ()')
A = np.pad(A, ((0, 0), (0, 1), (0, 0), (0, 1))) + \
np.pad(A, ((0, 0), (1, 0), (0, 0), (1,0)))
return rearrange(A, 'm x n y -> (m x) (n y)')
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
elif measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
elif measure == 'fourier':
freqs = np.arange(N//2)
d = np.stack([freqs, np.zeros(N//2)], axis=-1).reshape(-1)[:-1]
A = 2*np.pi*(np.diag(d, 1) - np.diag(d, -1))
A = A - embed_c2r(np.ones((N//2, N//2)))
B = embed_c2r(np.ones((N//2, 1)))[..., :1]
elif measure == 'random':
A = np.random.randn(N, N) / N
B = np.random.randn(N, 1)
elif measure == 'diagonal':
A = -np.diag(np.exp(np.random.randn(N)))
B = np.random.randn(N, 1)
else:
raise NotImplementedError
return A, B
def rank_correction(measure, N, rank=1, dtype=torch.float):
""" Return low-rank matrix L such that A + L is normal """
if measure == 'legs':
assert rank >= 1
P = torch.sqrt(.5+torch.arange(N, dtype=dtype)).unsqueeze(0) # (1 N)
elif measure == 'legt':
assert rank >= 2
P = torch.sqrt(1+2*torch.arange(N, dtype=dtype)) # (N)
P0 = P.clone()
P0[0::2] = 0.
P1 = P.clone()
P1[1::2] = 0.
P = torch.stack([P0, P1], dim=0) # (2 N)
elif measure == 'lagt':
assert rank >= 1
P = .5**.5 * torch.ones(1, N, dtype=dtype)
elif measure == 'fourier':
P = torch.ones(N, dtype=dtype) # (N)
P0 = P.clone()
P0[0::2] = 0.
P1 = P.clone()
P1[1::2] = 0.
P = torch.stack([P0, P1], dim=0) # (2 N)
else: raise NotImplementedError
d = P.size(0)
if rank > d:
P = torch.cat([P, torch.zeros(rank-d, N, dtype=dtype)], dim=0) # (rank N)
return P
def nplr(measure, N, rank=1, dtype=torch.float):
""" Return w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
"""
assert dtype == torch.float or torch.cfloat
if measure == 'random':
dtype = torch.cfloat if dtype == torch.float else torch.cdouble
# w = torch.randn(N//2, dtype=dtype)
w = -torch.exp(torch.randn(N//2)) + 1j*torch.randn(N//2)
P = torch.randn(rank, N//2, dtype=dtype)
B = torch.randn(N//2, dtype=dtype)
V = torch.eye(N, dtype=dtype)[..., :N//2] # Only used in testing
return w, P, B, V
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
P = rank_correction(measure, N, rank=rank, dtype=dtype)
AP = A + torch.sum(P.unsqueeze(-2)*P.unsqueeze(-1), dim=-3)
w, V = torch.linalg.eig(AP) # (..., N) (..., N, N)
# V w V^{-1} = A
# Only keep one of the conjugate pairs
w = w[..., 0::2].contiguous()
V = V[..., 0::2].contiguous()
V_inv = V.conj().transpose(-1, -2)
B = contract('ij, j -> i', V_inv, B.to(V)) # V^* B
P = contract('ij, ...j -> ...i', V_inv, P.to(V)) # V^* P
return w, P, B, V
def bilinear(dt, A, B=None):
"""
dt: (...) timescales
A: (... N N)
B: (... N)
"""
N = A.shape[-1]
I = torch.eye(N).to(A)
A_backwards = I - dt[:, None, None] / 2 * A
A_forwards = I + dt[:, None, None] / 2 * A
if B is None:
dB = None
else:
dB = dt[..., None] * torch.linalg.solve(
A_backwards, B.unsqueeze(-1)
).squeeze(-1) # (... N)
dA = torch.linalg.solve(A_backwards, A_forwards) # (... N N)
return dA, dB
class SSKernelNPLR(nn.Module):
"""Stores a representation of and computes the SSKernel function K_L(A^dt, B^dt, C) corresponding to a discretized state space, where A is Normal + Low Rank (NPLR)
The class name stands for 'State-Space SSKernel for Normal Plus Low-Rank'.
The parameters of this function are as follows.
A: (... N N) the state matrix
B: (... N) input matrix
C: (... N) output matrix
dt: (...) timescales / discretization step size
p, q: (... P N) low-rank correction to A, such that Ap=A+pq^T is a normal matrix
The forward pass of this Module returns:
(... L) that represents represents FFT SSKernel_L(A^dt, B^dt, C)
"""
@torch.no_grad()
def _setup_C(self, double_length=False):
""" Construct C~ from C
double_length: current C is for length L, convert it to length 2L
"""
C = _r2c(self.C)
self._setup_state()
dA_L = power(self.L, self.dA)
# Multiply C by I - dA_L
C_ = _conj(C)
prod = contract("h m n, c h n -> c h m", dA_L.transpose(-1, -2), C_)
if double_length: prod = -prod # Multiply by I + dA_L instead
C_ = C_ - prod
C_ = C_[..., :self.N] # Take conjugate pairs again
self.C.copy_(_c2r(C_))
if double_length:
self.L *= 2
self._omega(self.L, dtype=C.dtype, device=C.device, cache=True)
def _omega(self, L, dtype, device, cache=True):
""" Calculate (and cache) FFT nodes and their "unprocessed" them with the bilinear transform
This should be called everytime the internal length self.L changes """
omega = torch.tensor(
np.exp(-2j * np.pi / (L)), dtype=dtype, device=device
) # \omega_{2L}
omega = omega ** torch.arange(0, L // 2 + 1, device=device)
z = 2 * (1 - omega) / (1 + omega)
if cache:
self.register_buffer("omega", _c2r(omega))
self.register_buffer("z", _c2r(z))
return omega, z
def __init__(
self,
L, w, P, B, C, log_dt,
hurwitz=False,
trainable=None,
lr=None,
tie_state=False,
length_correction=True,
verbose=False,
):
"""
L: Maximum length; this module computes an SSM kernel of length L
w: (N)
p: (r, N) low-rank correction to A
q: (r, N)
A represented by diag(w) - pq^*
B: (N)
dt: (H) timescale per feature
C: (H, C, N) system is 1-D to c-D (channels)
hurwitz: tie pq and ensure w has negative real part
trainable: toggle which of the parameters is trainable
lr: add hook to set lr of hippo parameters specially (everything besides C)
tie_state: tie all state parameters across the H hidden features
length_correction: multiply C by (I - dA^L) - can be turned off when L is large for slight speedup at initialization (only relevant when N large as well)
Note: tensor shape N here denotes half the true state size, because of conjugate symmetry
"""
super().__init__()
self.hurwitz = hurwitz
self.tie_state = tie_state
self.verbose = verbose
# Rank of low-rank correction
self.rank = P.shape[-2]
assert w.size(-1) == P.size(-1) == B.size(-1) == C.size(-1)
self.H = log_dt.size(-1)
self.N = w.size(-1)
# Broadcast everything to correct shapes
C = C.expand(torch.broadcast_shapes(C.shape, (1, self.H, self.N))) # (H, C, N)
H = 1 if self.tie_state else self.H
B = repeat(B, 'n -> 1 h n', h=H)
P = repeat(P, 'r n -> r h n', h=H)
w = repeat(w, 'n -> h n', h=H)
# Cache Fourier nodes every time we set up a desired length
self.L = L
if self.L is not None:
self._omega(self.L, dtype=C.dtype, device=C.device, cache=True)
# Register parameters
# C is a regular parameter, not state
# self.C = nn.Parameter(_c2r(C.conj().resolve_conj()))
self.C = nn.Parameter(_c2r(_resolve_conj(C)))
train = False
if trainable is None: trainable = {}
if trainable == False: trainable = {}
if trainable == True: trainable, train = {}, True
self.register("log_dt", log_dt, trainable.get('dt', train), lr, 0.0)
self.register("B", _c2r(B), trainable.get('B', train), lr, 0.0)
self.register("P", _c2r(P), trainable.get('P', train), lr, 0.0)
if self.hurwitz:
log_w_real = torch.log(-w.real + 1e-3) # Some of the HiPPO methods have real part 0
w_imag = w.imag
self.register("log_w_real", log_w_real, trainable.get('A', 0), lr, 0.0)
self.register("w_imag", w_imag, trainable.get('A', train), lr, 0.0)
self.Q = None
else:
self.register("w", _c2r(w), trainable.get('A', train), lr, 0.0)
# self.register("Q", _c2r(P.clone().conj().resolve_conj()), trainable.get('P', train), lr, 0.0)
Q = _resolve_conj(P.clone())
self.register("Q", _c2r(Q), trainable.get('P', train), lr, 0.0)
if length_correction:
self._setup_C()
def _w(self):
# Get the internal w (diagonal) parameter
if self.hurwitz:
w_real = -torch.exp(self.log_w_real)
w_imag = self.w_imag
w = w_real + 1j * w_imag
else:
w = _r2c(self.w) # (..., N)
return w
def forward(self, state=None, rate=1.0, L=None):
"""
state: (..., s, N) extra tensor that augments B
rate: sampling rate factor
returns: (..., c+s, L)
"""
# Handle sampling rate logic
# The idea is that this kernel's length (in continuous units) is self.L, while we are asked to provide a kernel of length L at (relative) sampling rate rate
# If either are not passed in, assume we're not asked to change the scale of our kernel
assert not (rate is None and L is None)
if rate is None:
rate = self.L / L
if L is None:
L = int(self.L / rate)
# Increase the internal length if needed
while rate * L > self.L:
self.double_length()
dt = torch.exp(self.log_dt) * rate
B = _r2c(self.B)
C = _r2c(self.C)
P = _r2c(self.P)
Q = P.conj() if self.Q is None else _r2c(self.Q)
w = self._w()
if rate == 1.0:
# Use cached FFT nodes
omega, z = _r2c(self.omega), _r2c(self.z) # (..., L)
else:
omega, z = self._omega(int(self.L/rate), dtype=w.dtype, device=w.device, cache=False)
if self.tie_state:
B = repeat(B, '... 1 n -> ... h n', h=self.H)
P = repeat(P, '... 1 n -> ... h n', h=self.H)
Q = repeat(Q, '... 1 n -> ... h n', h=self.H)
# Augment B
if state is not None:
# Have to "unbilinear" the state to put it into the same "type" as B
# Compute 1/dt * (I + dt/2 A) @ state
# Can do this without expanding (maybe minor speedup using conj symmetry in theory), but it's easier to read this way
s = _conj(state) if state.size(-1) == self.N else state # (B H N)
sA = (
s * _conj(w) # (B H N)
- contract('bhm, rhm, rhn -> bhn', s, _conj(Q), _conj(P))
)
s = s / dt.unsqueeze(-1) + sA / 2
s = s[..., :self.N]
B = torch.cat([s, B], dim=-3) # (s+1, H, N)
# Incorporate dt into A
w = w * dt.unsqueeze(-1) # (H N)
# Stack B and p, C and q for convenient batching
B = torch.cat([B, P], dim=-3) # (s+1+r, H, N)
C = torch.cat([C, Q], dim=-3) # (c+r, H, N)
# Incorporate B and C batch dimensions
v = B.unsqueeze(-3) * C.unsqueeze(-4) # (s+1+r, c+r, H, N)
# w = w[None, None, ...] # (1, 1, H, N)
# z = z[None, None, None, ...] # (1, 1, 1, L)
# Calculate resolvent at omega
if has_cauchy_extension and z.dtype == torch.cfloat:
r = cauchy_mult(v, z, w, symmetric=True)
elif has_pykeops:
r = cauchy_conj(v, z, w)
else:
r = cauchy_slow(v, z, w)
r = r * dt[None, None, :, None] # (S+1+R, C+R, H, L)
# Low-rank Woodbury correction
if self.rank == 1:
k_f = r[:-1, :-1, :, :] - r[:-1, -1:, :, :] * r[-1:, :-1, :, :] / (1 + r[-1:, -1:, :, :])
elif self.rank == 2:
r00 = r[: -self.rank, : -self.rank, :, :]
r01 = r[: -self.rank, -self.rank :, :, :]
r10 = r[-self.rank :, : -self.rank, :, :]
r11 = r[-self.rank :, -self.rank :, :, :]
det = (1 + r11[:1, :1, :, :]) * (1 + r11[1:, 1:, :, :]) - r11[:1, 1:, :, :] * r11[1:, :1, :, :]
s = (
r01[:, :1, :, :] * (1 + r11[1:, 1:, :, :]) * r10[:1, :, :, :]
+ r01[:, 1:, :, :] * (1 + r11[:1, :1, :, :]) * r10[1:, :, :, :]
- r01[:, :1, :, :] * (r11[:1, 1:, :, :]) * r10[1:, :, :, :]
- r01[:, 1:, :, :] * (r11[1:, :1, :, :]) * r10[:1, :, :, :]
)
s = s / det
k_f = r00 - s
else:
r00 = r[:-self.rank, :-self.rank, :, :]
r01 = r[:-self.rank, -self.rank:, :, :]
r10 = r[-self.rank:, :-self.rank, :, :]
r11 = r[-self.rank:, -self.rank:, :, :]
r11 = rearrange(r11, "a b h n -> h n a b")
r11 = torch.linalg.inv(torch.eye(self.rank, device=r.device) + r11)
r11 = rearrange(r11, "h n a b -> a b h n")
k_f = r00 - torch.einsum("i j h n, j k h n, k l h n -> i l h n", r01, r11, r10)
# Final correction for the bilinear transform
k_f = k_f * 2 / (1 + omega)
# Move from frequency to coefficients
k = torch.fft.irfft(k_f) # (S+1, C, H, L)
# Truncate to target length
k = k[..., :L]
if state is not None:
k_state = k[:-1, :, :, :] # (S, C, H, L)
else:
k_state = None
k_B = k[-1, :, :, :] # (C H L)
return k_B, k_state
@torch.no_grad()
def double_length(self):
if self.verbose: log.info(f"S4: Doubling length from L = {self.L} to {2*self.L}")
self._setup_C(double_length=True)
def _setup_linear(self):
""" Create parameters that allow fast linear stepping of state """
w = self._w()
B = _r2c(self.B) # (H N)
P = _r2c(self.P)
Q = P.conj() if self.Q is None else _r2c(self.Q)
# Prepare Linear stepping
dt = torch.exp(self.log_dt)
D = (2.0 / dt.unsqueeze(-1) - w).reciprocal() # (H, N)
R = (torch.eye(self.rank, dtype=w.dtype, device=w.device) + 2*contract('r h n, h n, s h n -> h r s', Q, D, P).real) # (H r r)
Q_D = rearrange(Q*D, 'r h n -> h r n')
R = torch.linalg.solve(R.to(Q_D), Q_D) # (H r N)
R = rearrange(R, 'h r n -> r h n')
self.step_params = {
"D": D, # (H N)
"R": R, # (r H N)
"P": P, # (r H N)
"Q": Q, # (r H N)
"B": B, # (1 H N)
"E": 2.0 / dt.unsqueeze(-1) + w, # (H N)
}
def _step_state_linear(self, u=None, state=None):
"""
Version of the step function that has time O(N) instead of O(N^2) per step, which takes advantage of the DPLR form and bilinear discretization.
Unfortunately, as currently implemented it's about 2x slower because it calls several sequential operations. Perhaps a fused CUDA kernel implementation would be much faster
u: (H) input
state: (H, N/2) state with conjugate pairs
Optionally, the state can have last dimension N
Returns: same shape as state
"""
C = _r2c(self.C) # View used for dtype/device
if u is None: # Special case used to find dA
u = torch.zeros(self.H, dtype=C.dtype, device=C.device)
if state is None: # Special case used to find dB
state = torch.zeros(self.H, self.N, dtype=C.dtype, device=C.device)
step_params = self.step_params.copy()
if state.size(-1) == self.N: # Only store half of the conjugate pairs; should be true by default
# There should be a slightly faster way using conjugate symmetry
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', _conj(p), _conj(x), _conj(y))[..., :self.N] # inner outer product
else:
assert state.size(-1) == 2*self.N
step_params = {k: _conj(v) for k, v in step_params.items()}
# TODO worth setting up a contract_expression in default_state if we want to use this at inference time for stepping
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', p, x, y) # inner outer product
D = step_params["D"] # (H N)
E = step_params["E"] # (H N)
R = step_params["R"] # (r H N)
P = step_params["P"] # (r H N)
Q = step_params["Q"] # (r H N)
B = step_params["B"] # (1 H N)
new_state = E * state - contract_fn(P, Q, state) # (B H N)
new_state = new_state + 2.0 * B * u.unsqueeze(-1) # (B H N)
new_state = D * (new_state - contract_fn(P, R, new_state))
return new_state
def _setup_state(self):
""" Construct dA and dB for discretized state equation """
# Construct dA and dB by using the stepping
self._setup_linear()
C = _r2c(self.C) # Just returns a view that we use for finding dtype/device
state = torch.eye(2*self.N, dtype=C.dtype, device=C.device).unsqueeze(-2) # (N 1 N)
dA = self._step_state_linear(state=state)
dA = rearrange(dA, "n h m -> h m n")
self.dA = dA # (H N N)
u = C.new_ones(self.H)
dB = self._step_state_linear(u=u)
dB = _conj(dB)
self.dB = rearrange(dB, '1 h n -> h n') # (H N)
def _step_state(self, u, state):
""" Must be called after self.default_state() is used to construct an initial state! """
next_state = self.state_contraction(self.dA, state) + self.input_contraction(self.dB, u)
return next_state
def setup_step(self, mode='dense'):
""" Set up dA, dB, dC discretized parameters for stepping """
self._setup_state()
# Calculate original C
dA_L = power(self.L, self.dA)
I = torch.eye(self.dA.size(-1)).to(dA_L)
C = _conj(_r2c(self.C)) # (H C N)
dC = torch.linalg.solve(
I - dA_L.transpose(-1, -2),
C.unsqueeze(-1),
).squeeze(-1)
self.dC = dC
# Do special preprocessing for different step modes
self._step_mode = mode
if mode == 'linear':
# Linear case: special step function for the state, we need to handle output
# use conjugate symmetry by default, which affects the output projection
self.dC = 2*self.dC[:, :, :self.N]
elif mode == 'diagonal':
# Eigendecomposition of the A matrix
L, V = torch.linalg.eig(self.dA)
V_inv = torch.linalg.inv(V)
# Check that the eigendedecomposition is correct
if self.verbose:
print("Diagonalization error:", torch.dist(V @ torch.diag_embed(L) @ V_inv, self.dA))
# Change the parameterization to diagonalize
self.dA = L
self.dB = contract('h n m, h m -> h n', V_inv, self.dB)
self.dC = contract('h n m, c h n -> c h m', V, self.dC)
elif mode == 'dense':
pass
else: raise NotImplementedError("NPLR Kernel step mode must be {'dense' | 'linear' | 'diagonal'}")
def default_state(self, *batch_shape):
C = _r2c(self.C)
N = C.size(-1)
H = C.size(-2)
# Cache the tensor contractions we will later do, for efficiency
# These are put in this function because they depend on the batch size
if self._step_mode !='linear':
N *= 2
if self._step_mode == 'diagonal':
self.state_contraction = contract_expression(
"h n, ... h n -> ... h n",
(H, N),
batch_shape + (H, N),
)
else:
# Dense (quadratic) case: expand all terms
self.state_contraction = contract_expression(
"h m n, ... h n -> ... h m",
(H, N, N),
batch_shape + (H, N),
)
self.input_contraction = contract_expression(
"h n, ... h -> ... h n",
(H, N), # self.dB.shape
batch_shape + (H,),
)
self.output_contraction = contract_expression(
"c h n, ... h n -> ... c h",
(C.shape[0], H, N), # self.dC.shape
batch_shape + (H, N),
)
state = torch.zeros(*batch_shape, H, N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
""" Must have called self.setup_step() and created state with self.default_state() before calling this """
if self._step_mode == 'linear':
new_state = self._step_state_linear(u, state)
else:
new_state = self._step_state(u, state)
y = self.output_contraction(self.dC, new_state)
return y, new_state
def register(self, name, tensor, trainable=False, lr=None, wd=None):
"""Utility method: register a tensor as a buffer or trainable parameter"""
if trainable:
self.register_parameter(name, nn.Parameter(tensor))
else:
self.register_buffer(name, tensor)
optim = {}
if trainable and lr is not None:
optim["lr"] = lr
if trainable and wd is not None:
optim["weight_decay"] = wd
if len(optim) > 0:
setattr(getattr(self, name), "_optim", optim)
class HippoSSKernel(nn.Module):
"""Wrapper around SSKernel that generates A, B, C, dt according to HiPPO arguments.
The SSKernel is expected to support the interface
forward()
default_state()
setup_step()
step()
"""
def __init__(
self,
H,
N=64,
L=1,
measure="legs",
rank=1,
channels=1, # 1-dim to C-dim map; can think of C as having separate "heads"
dt_min=0.001,
dt_max=0.1,
trainable=None, # Dictionary of options to train various HiPPO parameters
lr=None, # Hook to set LR of hippo parameters differently
length_correction=True, # Multiply by I-A|^L after initialization; can be turned off for initialization speed
hurwitz=False,
tie_state=False, # Tie parameters of HiPPO ODE across the H features
precision=1, # 1 (single) or 2 (double) for the kernel
resample=False, # If given inputs of different lengths, adjust the sampling rate. Note that L should always be provided in this case, as it assumes that L is the true underlying length of the continuous signal
verbose=False,
):
super().__init__()
self.N = N
self.H = H
L = L or 1
self.precision = precision
dtype = torch.double if self.precision == 2 else torch.float
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
self.rate = None if resample else 1.0
self.channels = channels
# Generate dt
log_dt = torch.rand(self.H, dtype=dtype) * (
math.log(dt_max) - math.log(dt_min)
) + math.log(dt_min)
w, p, B, _ = nplr(measure, self.N, rank, dtype=dtype)
C = torch.randn(channels, self.H, self.N // 2, dtype=cdtype)
self.kernel = SSKernelNPLR(
L, w, p, B, C,
log_dt,
hurwitz=hurwitz,
trainable=trainable,
lr=lr,
tie_state=tie_state,
length_correction=length_correction,
verbose=verbose,
)
def forward(self, L=None):
k, _ = self.kernel(rate=self.rate, L=L)
return k.float()
def step(self, u, state, **kwargs):
u, state = self.kernel.step(u, state, **kwargs)
# lol this just discards the real part. Not 100% sure this is
# intended or documented anywhere, but it is the official implementation...
return u.float(), state
def default_state(self, *args, **kwargs):
return self.kernel.default_state(*args, **kwargs)
class S4(nn.Module):
def __init__(
self,
d_model,
d_state=64,
l_max=1, # Maximum length of sequence. Fine if not provided: the kernel will keep doubling in length until longer than sequence. However, this can be marginally slower if the true length is not a power of 2
channels=1, # maps 1-dim to C-dim
bidirectional=False,
# Arguments for FF
activation='gelu', # activation in between SS and FF
postact=None, # activation after FF
initializer=None, # initializer on FF
weight_norm=False, # weight normalization on FF
hyper_act=None, # Use a "hypernetwork" multiplication
dropout=0.0,
transposed=True, # axis ordering (B, L, D) or (B, D, L)
verbose=False,
# SSM Kernel arguments
**kernel_args,
):
"""
d_state: the dimension of the state, also denoted by N
l_max: the maximum sequence length, also denoted by L
if this is not known at model creation, set l_max=1
channels: can be interpreted as a number of "heads"
bidirectional: bidirectional
dropout: standard dropout argument
transposed: choose backbone axis ordering of (B, L, H) or (B, H, L) [B=batch size, L=sequence length, H=hidden dimension]
Other options are all experimental and should not need to be configured
"""
super().__init__()
if verbose:
raise NotImplementedError()
self.h = d_model
self.n = d_state
self.bidirectional = bidirectional
self.channels = channels
self.transposed = transposed
# optional multiplicative modulation GLU-style
# https://arxiv.org/abs/2002.05202
self.hyper = hyper_act is not None
if self.hyper:
channels *= 2
self.hyper_activation = Activation(hyper_act)
self.D = nn.Parameter(torch.randn(channels, self.h))
if self.bidirectional:
channels *= 2
# SSM Kernel
self.kernel = HippoSSKernel(self.h, N=self.n, L=l_max, channels=channels, verbose=verbose, **kernel_args)
# Pointwise
self.activation = Activation(activation)
dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
self.output_linear = LinearActivation(
self.h*self.channels,
self.h,
transposed=self.transposed,
initializer=initializer,
activation=postact,
activate=True,
weight_norm=weight_norm,
)
def forward(self, u, **kwargs): # absorbs return_output and transformer src mask
"""
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed unless you know what you're doing
Returns: same shape as u
"""
if not self.transposed: u = u.transpose(-1, -2)
L = u.size(-1)
# Compute SS Kernel
k = self.kernel(L=L) # (C H L) (B C H L)
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, '(s c) h l -> s c h l', s=2)
k = F.pad(k0, (0, L)) \
+ F.pad(k1.flip(-1), (L, 0)) \
k_f = torch.fft.rfft(k, n=2*L) # (C H L)
u_f = torch.fft.rfft(u, n=2*L) # (B H L)
y_f = contract('bhl,chl->bchl', u_f, k_f) # k_f.unsqueeze(-4) * u_f.unsqueeze(-3) # (B C H L)
y = torch.fft.irfft(y_f, n=2*L)[..., :L] # (B C H L)
# Compute D term in state space equation - essentially a skip connection
y = y + contract('bhl,ch->bchl', u, self.D) # u.unsqueeze(-3) * self.D.unsqueeze(-1)
# Optional hyper-network multiplication
if self.hyper:
y, yh = rearrange(y, 'b (s c) h l -> s b c h l', s=2)
y = self.hyper_activation(yh) * y
# Reshape to flatten channels
y = rearrange(y, '... c h l -> ... (c h) l')
y = self.dropout(self.activation(y))
if not self.transposed: y = y.transpose(-1, -2)
y = self.output_linear(y)
return y, None
def step(self, u, state):
""" Step one time step as a recurrent model. Intended to be used during validation.
u: (B H)
state: (B H N)
Returns: output (B H), state (B H N)
"""
assert not self.training
y, next_state = self.kernel.step(u, state) # (B C H)
y = y + u.unsqueeze(-2) * self.D
y = rearrange(y, '... c h -> ... (c h)')
y = self.activation(y)
if self.transposed:
y = self.output_linear(y.unsqueeze(-1)).squeeze(-1)
else:
y = self.output_linear(y)
return y, next_state
def default_state(self, *batch_shape, device=None):
return self.kernel.default_state(*batch_shape)
@property
def d_state(self):
return self.h * self.n
@property
def d_output(self):
return self.h
@property
def state_to_tensor(self):
return lambda state: rearrange('... h n -> ... (h n)', state)
| 40,938 | 34.942932 | 218 | py |
CanineCutaneousTumors | CanineCutaneousTumors-main/evaluation/evaluation_helper.py | import sys
sys.path.insert(0, '../')
from torchvision import transforms
from fastai.vision import *
from sklearn.metrics import confusion_matrix
from tqdm import tqdm
from torch.nn.functional import fold
def segmentation_inference(slide,store, patch_size, level, batch_size, learner, overlap_factor, indices = None):
shape = slide.level_dimensions[level]
classification_indices = torch.zeros((0,2)).to(learner.data.device)
if indices != None:
x_indices = indices[:,0]
y_indices = indices[:,1]
else:
x_indices = np.arange(0,int((shape[0] // (patch_size*overlap_factor)) + 1))* int(patch_size * overlap_factor)
y_indices = np.arange(0,int((shape[1] // (patch_size*overlap_factor)) + 1))* int(patch_size * overlap_factor)
segmentation_results = store.create_dataset("segmentation", (shape[1], shape[0]), compression="gzip")
temp = torch.zeros(learner.data.c, int(2*patch_size-overlap_factor*patch_size), shape[0]).to(learner.data.device)
with torch.no_grad():
# segmentation inference
learner.model.eval()
for y in tqdm(y_indices,desc='Processing %s' % Path(slide._filename).stem):
x_loader = DataLoader(x_indices, batch_size=batch_size)
row_temp = []
for xs in x_loader:
input_batch = torch.stack([transforms.Normalize(*learner.data.stats)(pil2tensor(np.array(
slide.read_region(location=(int(x * slide.level_downsamples[level]),
int(y * slide.level_downsamples[level])),
level=level, size=(patch_size, patch_size)))[:, :, :3] / 255., np.float32)) for x
in xs])
seg_pred = torch.softmax(learner.model(input_batch.to(device=learner.data.device)),dim=1)
row_temp += [s.view(s.shape[0],s.shape[1]*s.shape[2],1) for s in seg_pred]
row_output = fold(torch.cat(row_temp, dim=2), (patch_size,int((len(x_indices) + 1) * patch_size * overlap_factor)),
kernel_size=(patch_size, patch_size), stride=int(patch_size * overlap_factor)).squeeze(1)[:,:,:shape[0]]
temp[:, int(patch_size * overlap_factor):, :] += row_output
temp = temp.roll(-int(patch_size * overlap_factor),dims=1)
temp[:, -int(patch_size * overlap_factor):, :] = 0
width = segmentation_results[y:int(y + patch_size * overlap_factor),:].shape[0]
for x in range(0, int(shape[0] // patch_size + 1)):
height = segmentation_results[:, int(x * patch_size):int((x + 1) * patch_size)].shape[1]
segmentation_results[
y:int(y + patch_size * overlap_factor),
int(x * patch_size):int((x + 1) * patch_size)] = temp[:,:width,int(x*patch_size):int(x*patch_size) + height].argmax(dim=0).cpu()
classification_indices = torch.cat((classification_indices, (torch.nonzero(
temp[:, :width, int(x * patch_size):int(x * patch_size) + height].argmax(
dim=0) == learner.data.classes.index("Tumor"))+torch.Tensor([y,x*patch_size]).to(learner.data.device))*slide.level_downsamples[level]), dim=0)
torch.cuda.empty_cache()
return classification_indices
def classification_inference(slide,store, patch_size,level,batch_size, learner, indices = None):
shape = slide.level_dimensions[level]
if indices != None:
indices = torch.unique(indices//slide.level_downsamples[level]//patch_size, dim=0).cpu().flip(dims=[1])*patch_size
else:
indices = np.indices((int(shape[0] // patch_size),int(shape[1]//patch_size))).reshape(2,-1).T*patch_size
classification_results = store.create_dataset("classification", (int(shape[1] // patch_size) , int(shape[0] // patch_size)), compression="gzip")
with torch.no_grad():
index_loader = DataLoader(indices, batch_size=batch_size)
# classification inference
learner.model.eval()
for ind in tqdm(index_loader,desc='Processing %s' % Path(slide._filename).stem):
input_batch = torch.stack([transforms.Normalize(*learner.data.stats)(pil2tensor(np.array(
slide.read_region(location=(int(i[0] * slide.level_downsamples[level]),
int(i[1] * slide.level_downsamples[level])),
level=level, size=(patch_size, patch_size)))[:, :, :3] / 255., np.float32)) for i
in ind])
clas_pred = learner.model(input_batch.to(device=learner.data.device))
if is_tuple(clas_pred):
clas_pred = clas_pred[-1]
clas_pred = torch.softmax(clas_pred,dim=1)
for j, i in enumerate(ind):
try:
classification_results[int(i[1]//patch_size), int(i[0]//patch_size)] = torch.argmax(clas_pred, dim=1)[j].cpu()
except:
continue
def segmentation_cm_matrix(slide_container, prediction, classes):
cm = np.zeros((classes,classes))
x_length = np.arange(int((slide_container.slide_shape[0] // slide_container.width) + 1))*slide_container.width
y_length = np.arange(int((slide_container.slide_shape[1] // slide_container.height) + 1))*slide_container.height
for x in tqdm(x_length):
for y in y_length:
pred = prediction["segmentation"][y:y+slide_container.height,x:x+slide_container.width]
gt = slide_container.get_y_patch(x, y)[:pred.shape[0],:pred.shape[1]]
cm += confusion_matrix(gt.flatten(), pred.flatten(), labels=range(classes))
return cm
def classification_cm_matrix(slide_container, prediction, classes):
cm = np.zeros((classes,classes))
x_length = int(slide_container.slide_shape[0] // slide_container.width)
y_length = int(slide_container.slide_shape[1] // slide_container.height)
for x in tqdm(range(0,x_length)):
for y in range(0,y_length):
pred = prediction["classification"][y,x]
gt = slide_container.get_y_patch(int(x*slide_container.width), int(y*slide_container.height))
gt = np.unique(gt)[np.argmax(np.unique(gt, return_counts=True)[1])]
if (pred != -1 and gt != -1):
cm[int(gt),int(pred)] += 1
return cm
def slide_jaccard_score(cm_matrix, labels):
ious = np.zeros((len(labels)))
ious[:] = np.NAN
total = cm_matrix.sum()
tp = np.diagonal(cm_matrix)
posPred = cm_matrix.sum(axis=0)
posGt = cm_matrix.sum(axis=1)
# Check which classes have elements
valid = posGt > 0
iousValid = np.logical_and(valid, posGt + posPred - tp > 0)
# Compute per-class results
ious[iousValid] = np.divide(tp[iousValid], posGt[iousValid] + posPred[iousValid] - tp[iousValid])
freqs = np.divide(posGt, total)
# Compute evaluation metrics
miou = np.mean(ious[iousValid])
fwiou = np.sum(np.multiply(ious[iousValid], freqs[iousValid]))
print("IoUs: ", dict(zip(np.array(labels)[iousValid], np.round(ious[iousValid],4))), "Mean: ", miou)
print("Frequency-Weighted IoU: ", np.round(fwiou,4))
def slide_tumor_recall(cm_matrix, labels):
slide_label = np.argmax(np.sum(cm_matrix[1:,:], axis=1)) + 1
print("Slide Label: ", labels[slide_label])
print("Slide Classification: ", labels[np.argmax(cm_matrix[slide_label,1:]) + 1])
print("Tumor Recall: ", np.round(cm_matrix[slide_label, slide_label] / np.sum(cm_matrix[1:, 1:]), 4))
| 7,564 | 53.818841 | 166 | py |
CanineCutaneousTumors | CanineCutaneousTumors-main/evaluation/metrics.py | from fastai.vision import *
from sklearn.metrics import jaccard_score
def iou(outputs: torch.Tensor, labels: torch.Tensor):
outputs_max = outputs.argmax(dim=1)
labels_squeezed = labels.squeeze(1)
return tensor(np.mean(jaccard_score(to_np(outputs_max.view(-1)),to_np(labels_squeezed.view(-1)), average=None)))
def background_iou(outputs: torch.Tensor, labels: torch.Tensor):
outputs_max = outputs.argmax(dim=1)
labels_squeezed = labels.squeeze(1)
return tensor(np.mean(jaccard_score(to_np(outputs_max.view(-1)),to_np(labels_squeezed.view(-1)), average=None, labels=[0])))
def dermis_iou(outputs: torch.Tensor, labels: torch.Tensor):
outputs_max = outputs.argmax(dim=1)
labels_squeezed = labels.squeeze(1)
return tensor(np.mean(jaccard_score(to_np(outputs_max.view(-1)),to_np(labels_squeezed.view(-1)), average=None, labels=[1])))
def epidermis_iou(outputs: torch.Tensor, labels: torch.Tensor):
outputs_max = outputs.argmax(dim=1)
labels_squeezed = labels.squeeze(1)
return tensor(np.mean(jaccard_score(to_np(outputs_max.view(-1)),to_np(labels_squeezed.view(-1)), average=None, labels=[2])))
def subcutis_iou(outputs: torch.Tensor, labels: torch.Tensor):
outputs_max = outputs.argmax(dim=1)
labels_squeezed = labels.squeeze(1)
return tensor(np.mean(jaccard_score(to_np(outputs_max.view(-1)),to_np(labels_squeezed.view(-1)), average=None, labels=[3])))
def infl_nec_iou(outputs: torch.Tensor, labels: torch.Tensor):
outputs_max = outputs.argmax(dim=1)
labels_squeezed = labels.squeeze(1)
return tensor(np.mean(jaccard_score(to_np(outputs_max.view(-1)),to_np(labels_squeezed.view(-1)), average=None, labels=[4])))
def tumor_iou(outputs: torch.Tensor, labels: torch.Tensor):
outputs_max = outputs.argmax(dim=1)
labels_squeezed = labels.squeeze(1)
return tensor(np.mean(jaccard_score(to_np(outputs_max.view(-1)),to_np(labels_squeezed.view(-1)), average=None, labels=[5])))
| 1,958 | 50.552632 | 128 | py |
CanineCutaneousTumors | CanineCutaneousTumors-main/segmentation/custom_loss_functions.py | from fastai.vision import *
class FocalLoss(nn.modules.loss._WeightedLoss):
def __init__(self, weight=None, gamma=2,reduction='mean', ignore_index=-1):
super(FocalLoss, self).__init__(weight,reduction=reduction)
self.gamma = gamma
self.ignore_index = ignore_index
self.weight = weight
def forward(self, output, target):
ce_loss = F.cross_entropy(output, target, reduction='none', weight=self.weight,ignore_index=self.ignore_index)
if self.reduction == 'sum':
ce_loss = ce_loss.sum()
else:
ce_loss = ce_loss.mean()
pt = torch.exp(-ce_loss)
focal_loss = ((1 - pt) ** self.gamma * ce_loss)
return focal_loss
class DiceLoss(nn.modules.loss._WeightedLoss):
def __init__(self, weight=None, reduction='mean', ignore_index=-1):
super(DiceLoss, self).__init__(weight,reduction=reduction)
self.ignore_index = ignore_index
self.weight = weight
def forward(self, output, target):
eps = 0.0001
output = torch.softmax(output, dim=1)
encoded_target = output.detach() * 0
if self.ignore_index is not None:
mask = target == self.ignore_index
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
encoded_target.scatter_(1, target.unsqueeze(1), 1)
if self.weight is None:
weights = 1
intersection = output * encoded_target
numerator = intersection.sum(0)
denominator = output + encoded_target
if self.ignore_index is not None:
denominator[mask] = 0
denominator = denominator.sum(0)
return 1-((2*(weights*numerator).sum() + eps)/(weights*denominator).sum() + eps)
class ComboLoss(nn.Module):
def __init__(self, reduction='mean', loss_funcs=[FocalLoss(), DiceLoss()], loss_wts=[1, 1], ch_wts=[1, 1, 1]):
super().__init__()
self.reduction = reduction
self.ch_wts = ch_wts
self.loss_wts = loss_wts
self.loss_funcs = loss_funcs
def forward(self, output, target):
output = output.transpose(1,-1).contiguous()
target = target.transpose(1,-1).contiguous().view(-1)
output = output.view(-1,output.shape[-1])
for loss_func in self.loss_funcs:
loss_func.reduction = self.reduction
loss = 0
assert len(self.loss_wts) == len(self.loss_funcs)
for loss_wt, loss_func in zip(self.loss_wts, self.loss_funcs):
l = loss_wt*loss_func(output, target)
loss += l
return loss
| 2,765 | 34.461538 | 118 | py |
CanineCutaneousTumors | CanineCutaneousTumors-main/segmentation/custom_callbacks.py | from fastai.vision import *
from fastai.callbacks import TrackerCallback
class UpdateProbabilitiesCallback(TrackerCallback):
def __init__(self, learn:Learner, trainslides):
self.iou_dict = dict.fromkeys(["background_iou" , "dermis_iou", "epidermis_iou", "subcutis_iou", "infl_nec_iou", "tumor_iou"],0)
self.tissue_to_iou = {0: "background_iou", 3: "dermis_iou", 4: "epidermis_iou", 5: "subcutis_iou", 6:"infl_nec_iou",
7: "tumor_iou", 8: "tumor_iou",9: "tumor_iou",10: "tumor_iou",11: "tumor_iou",12: "tumor_iou",13: "tumor_iou"}
self.trainslides = trainslides
super().__init__(learn)
def on_epoch_end(self, epoch, **kwargs:Any):
for iou in (self.iou_dict.keys()):
position = self.learn.recorder.metrics_names.index(iou)
value = self.learn.recorder.metrics[0][position]
self.iou_dict[iou] = 1 - float(value)
for slide in self.trainslides:
slide.probabilities.update((k, self.iou_dict[self.tissue_to_iou[k]]) for k in set(self.tissue_to_iou).intersection(slide.probabilities)) | 1,111 | 60.777778 | 148 | py |
CanineCutaneousTumors | CanineCutaneousTumors-main/slide/slide_container.py | import openslide
import cv2
from fastai.vision import *
from shapely import geometry
class SlideContainer:
def __init__(self, file: Path,
annotation_file,
level: int = 0,
width: int = 256, height: int = 256,
sample_func = None,dataset_type=None, label_dict=None):
self.file = file
with open(annotation_file) as f:
data = json.load(f)
self.tissue_classes = dict(zip([cat["name"] for cat in data["categories"]],[cat["id"] for cat in data["categories"]]))
image_id = [i["id"] for i in data["images"] if i["file_name"] == file.name][0]
self.polygons = [anno for anno in data['annotations'] if anno["image_id"] == image_id]
self.labels = set([poly["category_id"] for poly in self.polygons])
self.labels.discard(self.tissue_classes["Bone"])
self.labels.discard(self.tissue_classes["Cartilage"])
self.training_dict = dict.fromkeys(list(self.labels))
self.probabilities = dict.fromkeys(list(self.labels), 1 / len(list(self.labels)))
self.slide = openslide.open_slide(str(file))
thumbnail = cv2.cvtColor(
np.array(self.slide.read_region((0, 0), self.slide.level_count - 1, self.slide.level_dimensions[-1]))[:, :,
:3], cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(thumbnail,(5,5),0)
self.white,_ = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
self.width = width
self.height = height
self.down_factor = self.slide.level_downsamples[level]
if level is None:
level = self.slide.level_count - 1
self._level = level
self.sample_func = sample_func
self.dataset_type = dataset_type
self.label_dict = label_dict
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self.down_factor = self.slide.level_downsamples[value]
self._level = value
@property
def shape(self):
return self.width, self.height
@property
def slide_shape(self):
return self.slide.level_dimensions[self._level]
def get_new_level(self):
return self._level
def get_patch(self, x: int = 0, y: int = 0):
rgb = np.array(self.slide.read_region(location=(int(x * self.down_factor), int(y * self.down_factor)),
level=self._level, size=(self.width, self.height)))[:, :, :3]
return rgb
def get_y_patch(self, x: int = 0, y: int = 0):
y_patch = -1*np.ones(shape=(self.height, self.width), dtype=np.int8)
inv_map = {v: k for k, v in self.tissue_classes.items()}
for poly in self.polygons:
coordinates = np.array(poly['segmentation']).reshape((-1,2))/ self.down_factor
coordinates = coordinates - (x, y)
label = self.label_dict[inv_map[poly["category_id"]]]
cv2.drawContours(y_patch, [coordinates.reshape((-1, 1, 2)).astype(int)], -1, label, -1)
if self.dataset_type == 'segmentation':
white_mask = cv2.cvtColor(self.get_patch(x,y),cv2.COLOR_RGB2GRAY) > self.white
excluded = (y_patch == -1)
y_patch[np.logical_and(white_mask, excluded)] = 0
return y_patch
def get_new_train_coordinates(self):
inv_map = {v: k for k, v in self.tissue_classes.items()}
# use passed sampling method
if callable(self.sample_func):
return self.sample_func(self.polygons, **{"classes":self.labels ,"size": self.shape,
"level_dimensions": self.slide.level_dimensions,
"level": self.level})
# default sampling method
xmin, ymin = 0,0
found = False
while not found:
iter = 0
label = random.choices(list(self.probabilities.keys()), list(self.probabilities.values()))[0]
polygons = [poly for poly in self.polygons if poly["category_id"] == label]
polygons_area = [poly["area"] for poly in polygons]
polygons_area = np.array(polygons_area) / sum(polygons_area)
polygon = random.choices(polygons, polygons_area)[0]
coordinates = np.array(polygon['segmentation']).reshape((-1, 2))
minx, miny, xrange, yrange = polygon["bbox"]
while iter < 25 and not found:
iter += 1
pnt = geometry.Point(random.uniform(minx, minx + xrange), random.uniform(miny, miny + yrange))
if geometry.Polygon(coordinates).contains(pnt):
xmin = pnt.x // self.down_factor - self.width / 2
ymin = pnt.y // self.down_factor - self.height / 2
found = True
if self.dataset_type == 'classification' and found:
if np.unique(self.get_y_patch(xmin, ymin))[
np.argmax(np.unique(self.get_y_patch(xmin, ymin), return_counts=True)[1])] != self.label_dict[
inv_map[label]] or np.sum(self.get_y_patch(xmin, ymin) == self.label_dict[inv_map[label]]) < (
self.width * self.height * 0.9):
found = False
return xmin, ymin
def __str__(self):
return str(self.file)
| 5,420 | 43.801653 | 130 | py |
CanineCutaneousTumors | CanineCutaneousTumors-main/slide/slide_helper.py | from fastai.vision import *
from fastai.data_block import *
from fastai.vision.data import SegmentationProcessor
from slide.slide_container import SlideContainer
PreProcessors = Union[PreProcessor, Collection[PreProcessor]]
fastai_types[PreProcessors] = 'PreProcessors'
class SlideLabelList(LabelList):
def __getitem__(self, idxs: Union[int, np.ndarray]) -> 'LabelList':
idxs = try_int(idxs)
if isinstance(idxs, numbers.Integral):
if self.item is None:
slide_container = self.x.items[idxs]
slide_container_y = self.y.items[idxs]
xmin, ymin = slide_container.get_new_train_coordinates()
x = self.x.get(idxs, xmin, ymin)
try:
y = self.y.get(idxs, xmin, ymin)
except:
y = self.y.get(idxs)
else:
x, y = self.item, -1
if self.tfms or self.tfmargs:
x = x.apply_tfms(self.tfms, **self.tfmargs)
if hasattr(self, 'tfms_y') and self.tfm_y and self.item is None:
y = y.apply_tfms(self.tfms_y, **{**self.tfmargs_y, 'do_resolve': False})
if y is None: y = -1
return x, y
else:
return self.new(self.x[idxs], self.y[idxs])
class SlideItemList(ItemList):
def __init__(self, items:Iterator, path:PathOrStr='.', label_cls:Callable=None, inner_df:Any=None,
processor:PreProcessors=None, x:'ItemList'=None, ignore_empty:bool=False):
self.path = Path(path)
self.num_parts = len(self.path.parts)
self.items,self.x,self.ignore_empty = items,x,ignore_empty
self.sizes = [None] * len(self.items)
if not isinstance(self.items,np.ndarray): self.items = array(self.items, dtype=object)
self.label_cls,self.inner_df,self.processor = ifnone(label_cls,self._label_cls),inner_df,processor
self._label_list,self._split = SlideLabelList,ItemLists
self.copy_new = ['x', 'label_cls', 'path']
def __getitem__(self,idxs: int, x: int=0, y: int=0)->Any:
idxs = try_int(idxs)
if isinstance(idxs, numbers.Integral):
return self.get(idxs, x, y)
else:
return self.get(*idxs)
class SlideImageItemList(SlideItemList):
pass
class SlideSegmentationItemList(SlideImageItemList, ImageList):
def get(self, i, x: int, y: int):
fn = self.items[i]
res = self.open(fn, x, y)
self.sizes[i] = res.size
return res
def open(self, fn: SlideContainer, x: int=0, y: int=0):
patch = fn.get_patch(x, y) / 255.
return Image(pil2tensor(patch, np.float32))
class SlideSegmentationLabelList(ImageList, SlideImageItemList):
"`ItemList` for segmentation masks."
_processor=SegmentationProcessor
def __init__(self, items:Iterator, classes:Collection=None, **kwargs):
super().__init__(items, **kwargs)
self.copy_new.append('classes')
self.classes = classes
def get(self, i, x: int, y: int):
fn = self.items[i]
res = self.open(fn, x, y)
self.sizes[i] = res.size
return res
def open(self, fn: SlideContainer, x: int=0, y: int=0):
patch = fn.get_y_patch(x, y)
return ImageSegment(pil2tensor(patch, np.float32))
def analyze_pred(self, pred, thresh:float=0.5): return pred.argmax(dim=0)[None]
def reconstruct(self, t:Tensor):
return ImageSegment(t)
class SlideClassificationLabelList(CategoryList):
_processor = CategoryProcessor
def __init__(self, items: Iterator, classes: Collection = None, label_delim: str = None, **kwargs):
super().__init__(items, classes=classes, **kwargs)
self.copy_new.append('classes')
self.classes = classes
def get(self, i, x: int, y: int):
o = self.items[i]
fn = self.x.items[i]
res = fn.get_y_patch(x, y)
dominant_label = np.unique(res)[np.argmax(np.unique(res, return_counts=True)[1])]
return Category(LongTensor([dominant_label]), self.classes[dominant_label]) | 4,123 | 36.153153 | 106 | py |
Aegean | Aegean-main/doc/conf.py | # -*- coding: utf-8 -*-
#
# AegeanTools documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 27 14:54:34 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
'myst_parser']
# config numpydoc to not use autosummary
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = {'.rst': 'restructuredtext',
'.md': 'markdown'}
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AegeanTools'
copyright = u'2022, PaulHancock'
author = u'PaulHancock'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2'
# The full version, including alpha/beta/rc tags.
release = u'2.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AegeanToolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AegeanTools.tex', u'AegeanTools Documentation',
u'PaulHancock', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aegeantools', u'AegeanTools Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AegeanTools', u'AegeanTools Documentation',
author, 'AegeanTools', 'One line description of project.',
'Miscellaneous'),
]
| 5,022 | 30.006173 | 79 | py |
FLM | FLM-master/run.py | import os
import copy
import json
import torch
import pytorch_lightning as pl
from flm.modules import FLMTransformerSS
from flm.datamodules.multitask_datamodule import MTDataModule
from flm.config import ex
def args_checker(config):
if config['enable_flm_aux_lm_loss']:
assert config['loss_names']['flm'] > 0
assert config['flm_backbone']
assert config['is_causal_mask']
assert config["hidden_size"] == config["hidden_size_for_fusion"], \
"only support hidden_size_for_fusion=hidden_size"
@ex.automain
def run(_config):
config = copy.deepcopy(_config)
args_checker(config)
# print(os.environ)
world_size = int(os.environ.get('WORLD_SIZE', 1))
rank = int(os.environ.get('RANK', 0))
local_rank = int(os.environ.get('LOCAL_RANK', 0))
nnodes = int(os.environ.get('NNODES', 1))
config["world_size"] = world_size
config["rank"] = rank
config["nnodes"] = nnodes
config["num_nodes"] = nnodes
config["local_rank"] = local_rank
device = torch.device(f'cuda:{local_rank}')
torch.cuda.set_device(device)
pl.seed_everything(config["seed"])
dm = MTDataModule(config, dist=True)
exp_name = f'{config["exp_name"]}'
os.makedirs(config["log_dir"], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=None, # use logger's path
save_top_k=config["ckpt_save_top_k"],
verbose=True,
monitor="val/the_metric",
mode="max",
save_last=True,
filename='epoch_{epoch:0>3d}-step_{step:0>6d}-val_score_{val/the_metric:.3f}',
auto_insert_metric_name=False,
)
version = 0 if config['fix_exp_version'] else None
logger = pl.loggers.TensorBoardLogger(
config["log_dir"],
name=f'{exp_name}_seed{config["seed"]}_from_{config["load_path"].split("/")[-1][:-5]}',
version=version,
)
config['exp_path'] = logger.root_dir
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (
config["num_gpus"]
if isinstance(config["num_gpus"], int)
else len(config["num_gpus"])
)
print(config)
available_batch_size = config["per_gpu_batchsize"] * \
num_gpus * config["num_nodes"]
grad_steps = max(config["batch_size"] // (available_batch_size), 1)
max_steps = config["max_steps"] if config["max_steps"] is not None else None
if local_rank == 0:
# print(os.environ)
print(
f' Node Num: {num_gpus}, Total GPU Numbers: {num_gpus * config["num_nodes"]}')
print(
f' Total Batch Size: {config["batch_size"]}, \
Available Batch Size: {available_batch_size}, \
Per GPU Batch Size: {config["per_gpu_batchsize"]},\
Grad Steps: {grad_steps}')
print(f' Resume_from: {config["resume_from"]}')
print(f' Load_path: {config["load_path"]}')
print(' All configs: \n', json.dumps(
_config, sort_keys=True, indent=4, separators=(',', ':')))
model = FLMTransformerSS(config)
trainer = pl.Trainer(
gpus=config["num_gpus"],
num_nodes=config["num_nodes"],
precision=config["precision"],
accelerator="ddp",
benchmark=True,
deterministic=True,
max_epochs=config["max_epoch"] if max_steps is None else 1000,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
prepare_data_per_node=config["prepare_data_per_node"],
replace_sampler_ddp=False,
accumulate_grad_batches=grad_steps,
log_every_n_steps=100,
flush_logs_every_n_steps=100,
resume_from_checkpoint=config["resume_from"],
weights_summary="top",
fast_dev_run=config["fast_dev_run"],
val_check_interval=config["val_check_interval"],
# progress_bar_refresh_rate= 5 if config['debug'] else 200,
num_sanity_val_steps=config['num_sanity_val_steps'],
)
if not config["test_only"]:
trainer.fit(model, datamodule=dm)
else:
trainer.test(model, datamodule=dm)
| 4,191 | 33.081301 | 95 | py |
FLM | FLM-master/flm/datamodules/multitask_datamodule.py | from builtins import hasattr
import functools
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from torch.utils.data.dataset import ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from . import _datamodules
import webdataset as wds
# datamodule for mutiple datasets
class MTDataModule(LightningDataModule):
def __init__(self, _config, dist=False):
datamodule_keys = _config["datasets"]
assert len(datamodule_keys) > 0
super().__init__()
self.dm_keys = datamodule_keys
self.dm_dicts = {key: _datamodules[key](
_config) for key in datamodule_keys}
self.dms = [v for k, v in self.dm_dicts.items()]
self.batch_size = self.dms[0].batch_size
self.vocab_size = self.dms[0].vocab_size
self.num_workers = self.dms[0].num_workers
self.dist = dist
self.allow_val_webdataset = _config['allow_val_webdataset']
def prepare_data(self):
for dm in self.dms:
dm.prepare_data()
def setup(self, stage):
def check_webdataset(dataset):
if hasattr(dataset, 'inner_dataset'):
return True
for dm in self.dms:
dm.setup(stage)
if check_webdataset(self.dms[0].train_dataset):
assert len(
self.dms) == 1, 'does not support webdataset instance larger than 1'
self.train_dataset = self.dms[0].train_dataset.inner_dataset
# self.train_dataset.append(wds.batched(self.batch_size))
else:
self.train_dataset = ConcatDataset(
[dm.train_dataset for dm in self.dms])
if check_webdataset(self.dms[0].val_dataset) and self.allow_val_webdataset:
self.val_dataset = self.dms[0].val_dataset.inner_dataset
# self.val_dataset.append(wds.batched(self.batch_size))
else:
self.val_dataset = ConcatDataset(
[dm.val_dataset for dm in self.dms])
if check_webdataset(self.dms[0].test_dataset) and self.allow_val_webdataset:
self.test_dataset = self.dms[0].test_dataset.inner_dataset
# self.test_dataset.append(wds.batched(self.batch_size))
else:
self.test_dataset = ConcatDataset(
[dm.test_dataset for dm in self.dms])
self.tokenizer = self.dms[0].tokenizer
self.train_collate = functools.partial(
self.dms[0].train_dataset.collate, mlm_collator=self.dms[0].mlm_collator
)
self.val_collate = functools.partial(
self.dms[0].val_dataset.collate, mlm_collator=self.dms[0].mlm_collator
)
self.test_collate = functools.partial(
self.dms[0].test_dataset.collate, mlm_collator=self.dms[0].mlm_collator
)
if self.dist:
if isinstance(self.train_dataset, wds.DataPipeline):
self.train_sampler = None
else:
self.train_sampler = DistributedSampler(
self.train_dataset, shuffle=True)
if isinstance(self.val_dataset, wds.DataPipeline) and self.allow_val_webdataset:
self.val_sampler = None
else:
self.val_sampler = DistributedSampler(
self.val_dataset, shuffle=True)
if isinstance(self.test_dataset, wds.DataPipeline) and self.allow_val_webdataset:
self.test_sampler = None
else:
self.test_sampler = DistributedSampler(
self.test_dataset, shuffle=False)
else:
self.train_sampler = None
self.val_sampler = None
self.test_sampler = None
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
sampler=self.train_sampler,
num_workers=self.num_workers,
collate_fn=self.train_collate,
)
return loader
def val_dataloader(self, batch_size=None):
loader = DataLoader(
self.val_dataset,
batch_size=batch_size if batch_size is not None else self.batch_size,
sampler=self.val_sampler,
num_workers=self.num_workers,
collate_fn=self.val_collate,
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.batch_size,
sampler=self.test_sampler,
num_workers=self.num_workers,
collate_fn=self.test_collate,
)
return loader
| 4,666 | 34.625954 | 93 | py |
FLM | FLM-master/flm/datamodules/datamodule_base.py | from random import shuffle
import torch
import functools
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from transformers import (
DataCollatorForLanguageModeling,
# DataCollatorForWholeWordMask,
BertTokenizer,
RobertaTokenizer,
)
from flm.utils.whole_word_masking import DataCollatorForWholeWordMask
class text_preprocessor():
"""prepend or append special tokens"""
def __init__(self, config) -> None:
self.prepend_bos = config['add_new_bos_token'] and config['prepend_bos_token']
self.append_eos = config['add_new_bos_token'] and config['append_eos_token']
def __call__(self, text):
text = text.rstrip().rstrip('.').rstrip() + '.'
if self.prepend_bos:
text = '<bos>' + ' ' + text
if self.append_eos:
text = text + ' ' + '<eos>'
return text
def flm_collator(attention_mask, mask_ratio, disable_shuffle=True, label_strategy='none'):
"""get flm masks and labels"""
text_len = attention_mask.sum(1)
bs, max_len = attention_mask.size()
flm_masks = -10000. * torch.ones(bs, max_len, max_len)
# attention_mask.unsqueeze(dim=2) * attention_mask.unsqueeze(dim=1)
flm_random_ids = []
mask_num = torch.distributions.Binomial(
text_len.float() - 1, mask_ratio).sample().int()
for i in range(len(text_len)):
flm_random_id = torch.randperm(text_len[i] - 1) + 1
flm_random_id = flm_random_id[:text_len[i] - 1 - mask_num[i]]
if disable_shuffle:
flm_random_id = torch.sort(flm_random_id)[0]
flm_random_ids.append(flm_random_id)
# print(flm_random_id)
for j in range(len(flm_random_id)):
if flm_random_id[j] < 0:
break
else:
flm_masks[i,
flm_random_id[j:j + 1].repeat(j+1),
flm_random_id[:j+1]] = 0
flm_label = None
if label_strategy == 'none':
pass
else:
if label_strategy == 'object':
pass
elif label_strategy == 'concrete':
pass
return flm_random_ids, flm_masks, flm_label
def sep_collator(flatten_encodings, mlm_collator, mask_ratio, pred_corr_ratio) -> None:
if pred_corr_ratio > 1:
repeat_num = int(pred_corr_ratio)
group_mlms = [[] for i in range(repeat_num)]
mlms = mlm_collator(flatten_encodings)
# print('mlms', mlms)
for idx, flatten_encoding in enumerate(flatten_encodings):
token_num = len(flatten_encoding['attention_mask'])
chunk_size = token_num // repeat_num + 1
org_input_id = torch.tensor(flatten_encoding['input_ids'])
mlm_input_id = mlms['input_ids'][idx]
mlm_labels = mlms['labels'][idx]
ava_mask_reg = torch.tensor(flatten_encoding['attention_mask']) * (
1 - torch.tensor(flatten_encoding['special_tokens_mask']))
perm = torch.randperm(token_num)
groups = perm.split(chunk_size)
assert len(groups) == repeat_num
for i in range(repeat_num):
group_mask = torch.zeros(token_num).long()
group_mask[groups[i]] = 1
group_input_id = org_input_id * \
(1-group_mask) + mlm_input_id * group_mask
group_label = -100 * torch.ones(token_num).long()
group_label[group_mask.bool()] = mlm_labels[group_mask.bool()]
group_mlm = {'input_ids': group_input_id,
'labels': group_label}
group_mlms[i].append(group_mlm)
# print(group_mask)
for i in range(repeat_num):
group_mlms[i] = {'input_ids': torch.stack([_['input_ids'] for _ in group_mlms[i]]),
'labels': torch.stack([_['labels'] for _ in group_mlms[i]])}
return group_mlms
elif pred_corr_ratio < 1:
mlms = mlm_collator(flatten_encodings)
group_labels = []
# print('mlms', mlms)
for idx, flatten_encoding in enumerate(flatten_encodings):
token_num = len(flatten_encoding['attention_mask'])
mlm_input_id = mlms['input_ids'][idx]
mlm_labels = mlms['labels'][idx]
perm = torch.randperm(token_num)[:int(token_num * pred_corr_ratio)]
group_label = -100 * torch.ones(token_num).long()
group_label[perm] = mlm_labels[perm]
group_labels.append(group_label)
group_mlm = {'input_ids': mlms['input_ids'],
'labels': torch.stack(group_labels, dim=0)}
return group_mlm
def get_pretrained_tokenizer(from_pretrained):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if 'roberta' in from_pretrained:
RobertaTokenizer.from_pretrained(from_pretrained)
else:
BertTokenizer.from_pretrained(
from_pretrained, do_lower_case="uncased" in from_pretrained
)
torch.distributed.barrier()
if 'roberta' in from_pretrained:
return RobertaTokenizer.from_pretrained(from_pretrained)
elif 'gpt2' in from_pretrained:
from transformers import GPT2Tokenizer, GPT2Model
return GPT2Tokenizer.from_pretrained('gpt2')
return BertTokenizer.from_pretrained(
from_pretrained, do_lower_case="uncased" in from_pretrained
)
class BaseDataModule(LightningDataModule):
def __init__(self, _config):
super().__init__()
self.data_dir = _config["data_root"]
self.num_workers = _config["num_workers"]
self.batch_size = _config["per_gpu_batchsize"]
self.eval_batch_size = self.batch_size
self.image_size = _config["image_size"]
self.max_text_len = _config["max_text_len"]
self.draw_false_image = _config["draw_false_image"]
self.draw_false_text = _config["draw_false_text"]
self.image_only = _config["image_only"]
self.train_transform_keys = (
["default_train"]
if len(_config["train_transform_keys"]) == 0
else _config["train_transform_keys"]
)
self.val_transform_keys = (
["default_val"]
if len(_config["val_transform_keys"]) == 0
else _config["val_transform_keys"]
)
tokenizer = _config["tokenizer"]
self.tokenizer = get_pretrained_tokenizer(tokenizer)
if _config['add_new_bos_token']:
self.tokenizer.add_tokens(['<bos>', '<eos>'])
self.vocab_size = self.tokenizer.vocab_size
collator = (
DataCollatorForWholeWordMask
if _config["whole_word_masking"]
else DataCollatorForLanguageModeling
)
self.mlm_collator = {'mlm_collator':
collator(tokenizer=self.tokenizer,
mlm=True,
mlm_probability=_config["mlm_prob"]),
"flm_collator":
functools.partial(
flm_collator,
mask_ratio=_config["flm_mask_prob"],
disable_shuffle=_config["disable_flm_shuffle"]),
}
self.text_preprocessor = text_preprocessor(_config)
self.setup_flag = False
self.max_dataset_len = _config.get('max_dataset_len', -1)
@property
def dataset_cls(self):
raise NotImplementedError("return tuple of dataset class")
@property
def dataset_name(self):
raise NotImplementedError("return name of dataset")
def set_train_dataset(self):
self.train_dataset = self.dataset_cls(
self.data_dir,
self.train_transform_keys,
split="train",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
tokenizer=self.tokenizer,
disable_sep_mlm=False,
text_preprocessor=self.text_preprocessor,
max_dataset_len=self.max_dataset_len
)
def set_val_dataset(self):
self.val_dataset = self.dataset_cls(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
tokenizer=self.tokenizer,
text_preprocessor=self.text_preprocessor,
max_dataset_len=self.max_dataset_len
)
if hasattr(self, "dataset_cls_no_false"):
self.val_dataset_no_false = self.dataset_cls_no_false(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=0,
draw_false_text=0,
image_only=self.image_only,
tokenizer=self.tokenizer,
text_preprocessor=self.text_preprocessor,
max_dataset_len=self.max_dataset_len
)
def make_no_false_val_dset(self, image_only=False):
return self.dataset_cls_no_false(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=0,
draw_false_text=0,
image_only=image_only,
tokenizer=self.tokenizer,
text_preprocessor=self.text_preprocessor,
max_dataset_len=self.max_dataset_len
)
def set_test_dataset(self):
self.test_dataset = self.dataset_cls(
self.data_dir,
self.val_transform_keys,
split="test",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
tokenizer=self.tokenizer,
text_preprocessor=self.text_preprocessor,
max_dataset_len=self.max_dataset_len
)
def setup(self, stage):
if not self.setup_flag:
self.set_train_dataset()
self.set_val_dataset()
self.set_test_dataset()
self.train_dataset.tokenizer = self.tokenizer
self.val_dataset.tokenizer = self.tokenizer
self.test_dataset.tokenizer = self.tokenizer
self.setup_flag = True
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.train_dataset.collate,
)
return loader
def val_dataloader(self):
loader = DataLoader(
self.val_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.val_dataset.collate,
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.test_dataset.collate,
)
return loader
| 11,900 | 35.959627 | 95 | py |
FLM | FLM-master/flm/gadgets/my_metrics.py | import torch
from pytorch_lightning.metrics import Metric
class Accuracy(Metric):
"""log the accuracy metric"""
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("correct", default=torch.tensor(
0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(
0.0), dist_reduce_fx="sum")
def update(self, logits, target, ignore_index=-100):
logits, target = (
logits.detach().to(self.correct.device),
target.detach().to(self.correct.device),
)
preds = logits.argmax(dim=-1)
preds = preds[target != ignore_index]
target = target[target != ignore_index]
if target.numel() == 0:
return 1
assert preds.shape == target.shape
self.correct += torch.sum(preds == target)
self.total += target.numel()
def compute(self):
return self.correct / self.total
class Scalar(Metric):
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("scalar", default=torch.tensor(
0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(
0.0), dist_reduce_fx="sum")
def update(self, scalar):
if isinstance(scalar, torch.Tensor):
scalar = scalar.detach().to(self.scalar.device)
else:
scalar = torch.tensor(scalar).float().to(self.scalar.device)
self.scalar += scalar
self.total += 1
def compute(self):
return self.scalar / self.total
class VQAScore(Metric):
"""calculate and log the VQA accuracy"""
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("score", default=torch.tensor(
0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(
0.0), dist_reduce_fx="sum")
def update(self, logits, target):
logits, target = (
logits.detach().float().to(self.score.device),
target.detach().float().to(self.score.device),
)
logits = torch.max(logits, 1)[1]
one_hots = torch.zeros(*target.size()).to(target)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = one_hots * target
self.score += scores.sum()
self.total += len(logits)
def compute(self):
return self.score / self.total
| 2,553 | 30.925 | 72 | py |
FLM | FLM-master/flm/modules/clip_model.py | # ------------------------------------------------------------------------
# CLIP
# Modified from https://github.com/openai/CLIP/blob/main/clip/model.py
# Copyright (c) OpenAI
# ------------------------------------------------------------------------
import warnings
from tqdm import tqdm
import urllib
import hashlib
import os
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
from torch import nn
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int,
n_head: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, x_mask: torch.Tensor):
if x_mask is not None:
x_mask = x_mask.to(dtype=torch.bool, device=x.device)
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) \
if self.attn_mask is not None else None
return self.attn(x, x, x,
need_weights=False,
attn_mask=self.attn_mask,
key_padding_mask=x_mask)[0]
def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None):
x = x + self.attention(self.ln_1(x), x_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int,
heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads, attn_mask)
for _ in range(layers-1)])
def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None):
for block in self.resblocks:
x = block(x, x_mask)
return x
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int,
layers: int, heads: int, output_dim: int,
resolution_after: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width,
kernel_size=patch_size, stride=patch_size,
bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn(
(resolution_after // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
def forward(self, x: torch.Tensor, x_mask):
x = self.conv1(x) # shape = [*, width, grid, grid]
# shape = [*, width, grid ** 2]
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
t = self.class_embedding.to(
x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype,
device=x.device)
x = torch.cat([t, x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, x_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x)
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
resolution_after=224,
):
super().__init__()
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
resolution_after=resolution_after,
)
self.vocab_size = vocab_size
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.visual.transformer.width ** -0.5) * \
((2 * self.visual.transformer.layers) ** -0.5)
attn_std = self.visual.transformer.width ** -0.5
fc_std = (2 * self.visual.transformer.width) ** -0.5
for block in self.visual.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def forward(self, image, image_mask=None):
return self.visual(image.type(self.dtype), image_mask)
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str = os.path.expanduser(".cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(
f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(
open(download_target, "rb").read()).hexdigest() \
== expected_sha256:
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not \
match; re-downloading the file")
with urllib.request.urlopen(url) as source, \
open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80,
unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(
open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(
"Model has been downloaded \
but the SHA256 checksum does not not match")
return download_target
def adapt_position_encoding(model, patch_size=32, after=384,
suffix='visual.positional_embedding'):
keys = [k for k in model if k.endswith(suffix)]
assert len(keys) == 1
key = keys[0]
origin_pos_embed = model[key]
origin_dim2 = False
if len(origin_pos_embed.shape) == 2:
origin_dim2 = True
origin_pos_embed = origin_pos_embed.unsqueeze(0)
grid_before = int(np.sqrt(origin_pos_embed.shape[1] - 1))
before = int(grid_before*patch_size)
assert (before % patch_size) == 0
grid_after = after // patch_size
assert (after % patch_size) == 0
embed_dim = origin_pos_embed.shape[-1]
pos_embed = origin_pos_embed[0, 1:, :].reshape(
(grid_before, grid_before, embed_dim))
new_size = (grid_after, grid_after)
pos_embed = torch.nn.functional.interpolate(pos_embed.permute(
(2, 0, 1)).unsqueeze(0), size=new_size, mode='bicubic')
pos_embed = pos_embed.squeeze(0).permute(
(1, 2, 0)).reshape((-1, embed_dim))
pos_embed = torch.cat(
(origin_pos_embed[0, 0:1, :], pos_embed), dim=0).unsqueeze(0)
assert pos_embed.shape == (1, grid_after * grid_after + 1, embed_dim)
if origin_dim2:
assert pos_embed.shape[0] == 1
pos_embed = pos_embed.squeeze(0)
model[key] = pos_embed
return model
def build_model(name, resolution_after=224):
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; \
available models = {available_models()}")
try:
model = torch.jit.load(model_path, map_location="cpu")
state_dict = None
except RuntimeError:
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. \
Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
state_dict = state_dict or model.state_dict()
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith(
"visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(
k.split(".")[2] for k in state_dict
if k.startswith("transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads,
transformer_layers, resolution_after,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
model_dict = model.state_dict()
pretrained_dict = state_dict
if resolution_after != image_resolution:
pretrained_dict = adapt_position_encoding(
pretrained_dict,
after=resolution_after,
patch_size=vision_patch_size)
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
return model
| 12,963 | 37.698507 | 154 | py |
FLM | FLM-master/flm/modules/flm_tools.py | import torch
import torch.nn.functional as F
def get_corr_bi_attention_mask(mask, mask_r, span_corr_rate=0):
"""prepare the attention mask in reconstrctor"""
bs, L, M, N = mask.shape
org_bi_mask = torch.cat([mask, mask_r], dim=-1)
bi_mask = org_bi_mask.detach().clone()
bi_mask[:, :, torch.arange(1, N), torch.arange(1, N)] = -10000.
bi_mask[:, :, torch.arange(
1, N), N + torch.arange(1, N)] = -10000. # [bs, L, L]
text_len = (bi_mask != -10000.).sum(dim=3) + 1
text_len[:, :, 0] = 1
if span_corr_rate > 0:
add_corr_rate = torch.maximum(torch.zeros_like(
text_len), (text_len * span_corr_rate - 1.)/(text_len - 1 + 1e-5))
mask_num = torch.distributions.Binomial(
text_len.float() - 1, add_corr_rate).sample().int()
start_bias = mask_num // 2 + torch.bernoulli(mask_num/2 - mask_num//2)
angle = torch.arange(0, N, device=mask.device).long()
start = torch.maximum(angle - start_bias.long(), 0*angle)
end = torch.minimum(start + N + mask_num, start.new_tensor(2*N-1))
start_step = angle[None, None].repeat(bs, L, 1) - start
for i in range(torch.max(start_step[:, :, 1:])):
bi_mask[torch.arange(bs).reshape(bs, 1, 1).repeat(1, L, N), torch.arange(L).reshape(1, L, 1).repeat(
bs, 1, N), angle[None, None].repeat(bs, L, 1), torch.minimum(start+i, angle[None, None])] = -10000.
end_step = end - angle[None, None].repeat(bs, L, 1) - N
for i in range(torch.max(end_step[:, :, 1:])):
bi_mask[torch.arange(bs).reshape(bs, 1, 1).repeat(1, L, N), torch.arange(L).reshape(1, L, 1).repeat(
bs, 1, N), angle[None, None].repeat(bs, L, 1), torch.maximum(end-i, N + angle[None, None])] = -10000.
return torch.cat([org_bi_mask[:, :, :1], bi_mask[:, :, 1:]], dim=2)
| 1,859 | 52.142857 | 117 | py |
FLM | FLM-master/flm/modules/meter_utils.py | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from .dist_utils import all_gather
from .objectives import compute_irtr_recall, compute_caption
from ..gadgets.my_metrics import Accuracy, VQAScore, Scalar
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v <= 0:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_irtr_loss", Scalar())
elif k == "mppd" or k == "mpfr":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
if 'flm' in k and pl_module.hparams.config["enable_flm_aux_lm_loss"]:
setattr(pl_module, f"{split}_flma1_accuracy", Accuracy())
setattr(pl_module, f"{split}_flma2_accuracy", Accuracy())
setattr(pl_module, f"{split}_flma1_loss", Scalar())
setattr(pl_module, f"{split}_flma2_loss", Scalar())
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_caption_metric"] and not pl_module.training:
b4, m, c, s = compute_caption(pl_module)
pl_module.logger.experiment.add_scalar(
"caption/b4", b4, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"caption/meter", m, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"caption/cider", c, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"caption/spice", s, pl_module.global_step
)
the_metric += c + m
# if pl_module.hparams.config["get_mlm_caption_metric"] and not pl_module.training:
# b4, m, c, s = compute_mlm_caption(pl_module)
# pl_module.logger.experiment.add_scalar(
# "caption/b4", b4, pl_module.global_step
# )
# pl_module.logger.experiment.add_scalar(
# "caption/meter", m, pl_module.global_step
# )
# pl_module.logger.experiment.add_scalar(
# "caption/cider", c, pl_module.global_step
# )
# pl_module.logger.experiment.add_scalar(
# "caption/spice", s, pl_module.global_step
# )
# the_metric += c + m
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5,
tr_r10) = compute_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += ir_r1.item() + tr_r1.item()
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v <= 0:
continue
value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "nlvr2" or loss_name == 'snli':
if phase == "train":
value = getattr(
pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value = getattr(
pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
value = getattr(
pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
elif loss_name == 'wino':
if phase == 'train':
pass
else:
value = getattr(
pl_module, f"test_{loss_name}_accuracy_img").compute()
value_text = getattr(
pl_module, f"test_{loss_name}_accuracy_text").compute()
pl_module.log(f"{loss_name}/test/accuracy_img_epoch", value)
pl_module.log(
f"{loss_name}/test/accuracy_text_epoch", value_text)
getattr(pl_module, f"test_{loss_name}_accuracy_img").reset()
getattr(pl_module, f"test_{loss_name}_accuracy_text").reset()
elif loss_name == "irtr":
pl_module.log(
f"{loss_name}/{phase}/irtr_loss_epoch",
getattr(pl_module, f"{phase}_irtr_loss").compute(),
)
getattr(pl_module, f"{phase}_irtr_loss").reset()
elif loss_name == "mppd" or loss_name == "mpfr":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(
pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
else:
value = getattr(
pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric)
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item()
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v > 0
]
return
def get_grouped_parameters(pl_module, no_decay, head_names, cross_modal_names,
wd, lr, lr_mult_head, lr_mult_cross_modal):
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr * lr_mult_head,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult_head,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr * lr_mult_cross_modal,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult_cross_modal,
},
]
return optimizer_grouped_parameters
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
]
head_names = ["vqa_classifier", "nlvr2_classifier", "mlm_score", "itm_score",
"snli_classifier", "lm_score", "flm_score", "cl_image", "cl_text"]
cross_modal_names = ['cross_modal', 'fusion_layers']
lr_mult_head = pl_module.hparams.config["lr_mult_head"]
lr_mult_cross_modal = pl_module.hparams.config["lr_mult_cross_modal"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
optimizer_grouped_parameters = get_grouped_parameters(
pl_module, no_decay, head_names, cross_modal_names, wd, lr, lr_mult_head, lr_mult_cross_modal)
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(
optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
)
| 14,272 | 38.537396 | 102 | py |
FLM | FLM-master/flm/modules/bert_model.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(
config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:,
past_key_values_length: seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(
2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(
self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(
self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(
distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + \
relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# pdb.set_trace()
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# if True:
if False:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertSelfOutputWithGate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.remove_norm = config.gate_remove_cross_att_norm
self.gate_type = config.gate_type
def forward(self, hidden_states, input_tensor, gate):
hidden_states = self.dense(hidden_states)
inter_hidden_states = self.dropout(hidden_states)
if self.gate_type == 'add1':
hidden_states = gate * inter_hidden_states + \
(1-gate) * input_tensor
elif self.gate_type == 'add2':
hidden_states = gate * inter_hidden_states + input_tensor
if not self.remove_norm:
hidden_states = self.LayerNorm(hidden_states)
return hidden_states, inter_hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - \
len(heads)
self.self.all_head_size = self.self.attention_head_size * \
self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
class BertAttentionWithGate(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutputWithGate(config)
self.output_after_gate = BertOutput(config)
self.gating = nn.Linear(config.hidden_size, 2)
self.tau = config.tau
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - \
len(heads)
self.self.all_head_size = self.self.attention_head_size * \
self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
gate = self.gating(self_outputs[0][:, :1])
gate = (gate > self.tau) * gate
attention_output = self.output(gate * self_outputs[0], hidden_states)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs, gate
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertCrossLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
encoder_hidden_states,
attention_mask=None,
encoder_attention_mask=None,
output_attentions=False,
disable_self_attention=False,
):
if disable_self_attention:
outputs = ()
attention_output = hidden_states
else:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
# past_key_value[:2] if past_key_value is not None else None
self_attn_past_key_value = None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask=None,
output_attentions=output_attentions,
past_key_value=None,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
# add self attentions if we output attention weights
outputs = self_attention_outputs[1:]
cross_attn_present_key_value = None
# pdb.set_trace()
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
None,
encoder_hidden_states,
encoder_attention_mask,
None,
output_attentions,
)
attention_output = cross_attention_outputs[0]
# add cross attentions if we output attention weights
outputs = outputs + cross_attention_outputs[1:-1]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config)
for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + \
(layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (
encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(
head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(
sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [
r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning(
"If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:,
:-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(
shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx)
for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [
r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat(
[attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(
seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)
) if input_ids is not None else None
attention_mask = attention_mask.view(
-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(
-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)
) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2),
inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(
loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 81,268 | 40.719199 | 213 | py |
FLM | FLM-master/flm/modules/dist_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device\
{}".format(get_rank(), len(buffer) / (1024 ** 3), device)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor(
[tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 7,810 | 27.822878 | 100 | py |
FLM | FLM-master/flm/modules/objectives.py | # flake8: noqa
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pycocotools.coco import COCO
from flm.pycocoevalcap.eval import COCOEvalCap
from .dist_utils import all_gather
def sim_matrix(a, b, eps=1e-8):
"""
added eps for numerical stability
"""
a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))
b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))
sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt
def contrastive_loss(x, temperature, cl_mask):
i_logsm = F.log_softmax(x * temperature, dim=1)
j_logsm = F.log_softmax(x.t() * temperature, dim=1)
# sum over positives
# idiag = torch.diag(i_logsm)
idiag = i_logsm * cl_mask
loss_i = idiag.sum() / len(idiag)
# jdiag = torch.diag(j_logsm)
jdiag = j_logsm * cl_mask
loss_j = jdiag.sum() / len(jdiag)
return - loss_i - loss_j
def compute_mlm(pl_module, batch, single_stream_backbone, enable_causal_mask=None):
infer = pl_module.infer(
batch, mask_text=True, flm_backbone=not single_stream_backbone, enable_causal_mask=enable_causal_mask)
mlm_head = pl_module.mlm_score if hasattr(
pl_module, 'mlm_score') else pl_module.mlm_score_cau
mlm_logits = mlm_head(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, mlm_logits.shape[-1]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret
def compute_lm(pl_module, batch, lm_type='ar'):
if lm_type == 'ar':
infer = pl_module.infer(batch, mask_text=False, do_lm=True)
mlm_logits = pl_module.lm_score(infer["text_feats"])[:, 1:-1]
mlm_labels = infer["text_labels"][:, 1:]
elif lm_type == 'flm':
infer = pl_module.infer(batch, mask_text=False, do_lm=True)
mlm_logits = pl_module.lm_score(infer["text_feats"])[:, 1:]
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.reshape(-1, mlm_logits.shape[-1]),
mlm_labels.reshape(-1),
ignore_index=1,
)
ret = {
f"{lm_type}_loss": mlm_loss,
f"{lm_type}_logits": mlm_logits,
f"{lm_type}_labels": mlm_labels,
f"{lm_type}_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_{lm_type}_loss")(
ret[f"{lm_type}_loss"])
acc = getattr(pl_module, f"{phase}_{lm_type}_accuracy")(
ret[f"{lm_type}_logits"], ret[f"{lm_type}_labels"], ignore_index=1
)
pl_module.log(f"{lm_type}/{phase}/loss", loss)
pl_module.log(f"{lm_type}/{phase}/accuracy", acc)
if lm_type == 'flm' and pl_module.hparams.config["enable_flm_aux_lm_loss"]:
lm_scorer1 = pl_module.lm_score1 if hasattr(
pl_module, 'lm_score1') else pl_module.lm_score_r
mlm_logits1 = lm_scorer1(infer["text_feats1"])[:, 1:-1]
mlm_labels1 = infer["text_labels"][:, 1:]
mlm_loss1 = F.cross_entropy(
mlm_logits1.reshape(-1, mlm_logits1.shape[-1]),
mlm_labels1.reshape(-1),
ignore_index=1,
)
lm_scorer2 = pl_module.lm_score2 if hasattr(
pl_module, 'lm_score2') else pl_module.lm_score_f
mlm_logits2 = lm_scorer2(infer["text_feats2"])[:, 2:]
mlm_labels2 = infer["text_labels"][:, :-1]
mlm_loss2 = F.cross_entropy(
mlm_logits2.reshape(-1, mlm_logits2.shape[-1]),
mlm_labels2.reshape(-1),
ignore_index=1,
)
phase = "train" if pl_module.training else "val"
loss1 = getattr(pl_module, f"{phase}_flma1_loss")(mlm_loss1)
acc1 = getattr(pl_module, f"{phase}_flma1_accuracy")(
mlm_logits1, mlm_labels1, ignore_index=1
)
loss2 = getattr(pl_module, f"{phase}_flma2_loss")(mlm_loss2)
acc2 = getattr(pl_module, f"{phase}_flma2_accuracy")(
mlm_logits2, mlm_labels2, ignore_index=1
)
pl_module.log(f"flma1/{phase}/loss", loss1)
pl_module.log(f"flma1/{phase}/accuracy", acc1)
pl_module.log(f"flma2/{phase}/loss", loss2)
pl_module.log(f"flma2/{phase}/accuracy", acc2)
all_weights = 1 + pl_module.hparams.config["flm_aux_lm_loss_l2r_weight"] + \
pl_module.hparams.config["flm_aux_lm_loss_r2l_weight"]
mlm_loss_all = 1/all_weights * \
(mlm_loss + pl_module.hparams.config["flm_aux_lm_loss_l2r_weight"] *
mlm_loss1 + pl_module.hparams.config["flm_aux_lm_loss_r2l_weight"] * mlm_loss2)
ret.update({
f"{lm_type}_loss": mlm_loss_all,
})
return ret
def compute_itm(pl_module, batch, single_stream_backbone, enable_causal_mask=None):
pos_len = len(batch["text"]) // 2
neg_len = len(batch["text"]) - pos_len
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
itm_images = [
torch.stack(
[
ti if itm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["image"], batch["false_image_0"])
]
batch = {k: v for k, v in batch.items()}
batch["image"] = itm_images
infer = pl_module.infer(
batch, mask_text=False, flm_backbone=not single_stream_backbone, enable_causal_mask=enable_causal_mask)
itm_logits = pl_module.itm_score(infer["cls_feats"])
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, image_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, image_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels.view(-1))
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(
batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(
batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret
def compute_irtr(pl_module, batch):
is_training_phase = pl_module.training
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat(
[batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat(
[batch["text_labels"].unsqueeze(1), text_labels], dim=1)
images = batch["image"][0].unsqueeze(
1).expand(_bs, false_len + 1, _c, _h, _w)
text_labels_lm = batch["text_labels_lm"].unsqueeze(
1).repeat(1, false_len + 1, 1)
text_all_masks_ids = batch[f"text_all_masks_ids"].unsqueeze(
1).repeat(1, false_len + 1, 1)
text_flm_masks = batch[f"text_flm_masks"].unsqueeze(
1).repeat(1, false_len + 1, 1, 1)
infer = pl_module.infer(
{
"image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
"text_labels_lm": rearrange(text_labels_lm, "bs fs tl -> (bs fs) tl"),
"text_all_masks_ids": rearrange(text_all_masks_ids, "bs fs tl -> (bs fs) tl"),
"text_flm_masks": rearrange(text_flm_masks, "bs fs tl ttl -> (bs fs) tl ttl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
phase = "train" if pl_module.training else "val"
irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
return ret
def evaluate(cache_path, ann_path, dist=True):
coco = COCO(ann_path)
valids = coco.getImgIds()
rank = torch.distributed.get_rank() if dist else 0
if rank == 0:
preds = json.load(open(cache_path))
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if int(p['image_id']) in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
# serialize to temporary json file. Sigh, COCO API...
json.dump(preds_filt, open(cache_path, 'w'))
if dist:
torch.distributed.barrier()
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
eval_res = cocoEval.eval
return eval_res
@torch.no_grad()
def compute_caption(pl_module):
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=32,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
max_text_len = 30
stop_word_ids = [image_dset.tokenizer.eos_token_id]
text_token_start_idx = 0
if pl_module.config['add_new_bos_token']:
stop_word_ids.append(
image_dset.tokenizer.convert_tokens_to_ids('<eos>'))
text_token_start_idx = 1
device = pl_module.device
rank = torch.distributed.get_rank()
prompt = pl_module.hparams.config['caption_prompt']
if prompt is not None:
prompt_tokens = image_dset.tokenizer.tokenize(prompt)
fake_start_ids = image_dset.tokenizer.convert_tokens_to_ids(
prompt_tokens)
results = []
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
B = _b['image'][0].shape[0]
img_ids = _b['img_index']
pred_ids = None
pred_ids_list = []
stop_flag = torch.full((B, ), 0).bool().to(device)
ava_len = torch.full((B, ), 0).to(device)
for t in range(max_text_len):
if t == 0:
text_ids = torch.full(
(B, 1), image_dset.tokenizer.bos_token_id).long().to(device)
text_masks = torch.full((B, 1), 1).long().to(device)
if prompt is not None:
pred_ids = torch.tensor(fake_start_ids)[
None].long().to(device).repeat(B, 1)
pred_ids_list.extend([pred_ids[:, i]
for i in range(pred_ids.shape[1])])
text_masks = torch.cat(
[text_masks, torch.full_like(pred_ids, 1)], dim=1).long().to(device)
text_ids = torch.cat((text_ids, pred_ids), dim=-1)
else:
text_ids = torch.cat((text_ids, pred_ids[:, None]), dim=-1)
text_masks = torch.cat(
[text_masks, 1 - stop_flag[:, None].long()], dim=-1)
_b['image'] = [__b.to(device) for __b in _b['image']]
_b['text_ids'] = text_ids
_b['text_masks'] = text_masks
_b['text_labels'] = None
_b["text_flm_mask_ids"] = None
_b['text_flm_masks'] = text_masks
if True:
all_mask_ids = text_masks * \
image_dset.tokenizer.convert_tokens_to_ids('<mask>')
all_mask_ids[:, 0] = text_ids[:, 0]
_b['text_all_masks_ids'] = all_mask_ids
_b['text_labels_lm'] = None
# pl_module.config['truncate_bottom_text_encoder_layer'] = True
if pl_module.config['flm_backbone']:
infer = pl_module.infer_three_stream(_b)
mlm_logits = getattr(pl_module, 'lm_score')(
infer['text_feats1'])[:, -1]
else:
infer = pl_module.infer_one_stream(_b)
mlm_logits = getattr(pl_module, 'lm_score')(
infer['text_feats'])[:, -1]
pred_ids = mlm_logits.argmax(1)
for stop_word_id in stop_word_ids:
stop_flag = stop_flag | (pred_ids == stop_word_id)
ava_len = ava_len + (1 - stop_flag.int())
pred_ids_list.append(pred_ids)
if (1 - stop_flag.int()).sum() == 0:
break
pred_ids_list = torch.stack(
pred_ids_list, dim=-1).cpu().numpy().tolist()
pred_texts = [image_dset.tokenizer.decode(
pred_id[text_token_start_idx: ava_len[i]+1]) for i, pred_id in enumerate(pred_ids_list)]
for idx, text in enumerate(pred_texts):
image_id = int(str(image_dset.table['image_id'][img_ids[idx]]).split('.')[
0].split('_')[-1])
results.extend([{'image_id': image_id, 'caption': text}])
# print('\n\n pred_texts', pred_texts)
rets = results
exp_path = pl_module.config['exp_path']
result_path_rank = os.path.join(exp_path, f"caption_{rank}.json")
with open(result_path_rank, "w") as fp:
print('!!! saving vqa results to {}'.format(result_path_rank))
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
result_path = os.path.join(exp_path, "caption.json")
if rank == 0:
jsons = list()
paths = list(glob.glob(os.path.join(exp_path, "caption_*.json")))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result", exist_ok=True)
with open(result_path, "w") as fp:
print('!!! saving final caption results to {}'.format(result_path))
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(os.path.join(exp_path, f"caption_{rank}.json"))
print('!!! deleting caption results at {}'.format(result_path_rank))
scores = evaluate(
result_path, 'data/coco_caption/captions_val2014.json')
print(scores)
torch.distributed.barrier()
b4, m, c, s = scores['Bleu_4'], scores['METEOR'], scores['CIDEr'], scores.get(
'SPICE', 0)
return b4, m, c, s
@torch.no_grad()
def compute_irtr_recall(pl_module, topk_indices=None):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
# TODO: speed up the process by caching text/image features
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
for k, v in _b.items():
if isinstance(v, torch.Tensor):
_b[k] = v.to(pl_module.device)
text_preload.append(_b)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
image_preload.append((_b['image'][0], _b["img_index"][0]))
rank_scores = list()
rank_iids = list()
for img_batch in tqdm.tqdm(image_preload, desc="img rank loop"):
_im, _iid = img_batch
img_batch_score = list()
for txt_batch in text_preload:
fblen = len(txt_batch["text_ids"])
im = _im.repeat(fblen, 1, 1, 1).to(
device=txt_batch['text_ids'].device)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
txt_batch,
img=im,
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score.cpu().tolist())
rank_iids.append(_iid)
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
return calculate_metric(scores, iids, tiids, tiids_dims=1)
def calculate_metric(scores, iids, tiids, tiids_dims=1):
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
if tiids_dims == 2:
topk10_iids = tiids[torch.arange(
len(topk10.indices)).unsqueeze(1), topk10.indices]
topk5_iids = tiids[torch.arange(
len(topk10.indices)).unsqueeze(1), topk5.indices]
topk1_iids = tiids[torch.arange(
len(topk10.indices)).unsqueeze(1), topk1.indices]
else:
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def vqa_test_step(pl_module, batch, output):
try:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
except:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["gqa_test"].id2answer
if "gqa_test" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["gqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": True}
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": False}
def arc_test_step(pl_module, batch, output):
return output
def vqa_test_wrapup(outs, model_name, exp_path='.'):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
gqa = False
for out in outs:
qids += out["qids"]
preds += out["preds"]
gqa = out['gqa']
rets = list()
for qid, pred in zip(qids, preds):
if gqa:
rets.append({"questionId": qid, "prediction": pred})
else:
rets.append({"question_id": qid, "answer": pred})
result_path_rank = os.path.join(exp_path, f"vqa_submit_{rank}.json")
with open(result_path_rank, "w") as fp:
print('!!! saving vqa results to {}'.format(result_path_rank))
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob(os.path.join(exp_path, "vqa_submit_*.json")))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result", exist_ok=True)
result_path = os.path.join(exp_path, f"vqa_submit_{model_name}.json")
with open(result_path, "w") as fp:
print('!!! saving final vqa results to {}'.format(result_path))
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(os.path.join(exp_path, f"vqa_submit_{rank}.json"))
print('!!! deleting vqa results at {}'.format(result_path_rank))
| 26,228 | 35.994358 | 111 | py |
FLM | FLM-master/flm/modules/flm_module.py | import torch
import torch.nn as nn
import pytorch_lightning as pl
from transformers.models.bert.modeling_bert import BertConfig, BertModel
from .bert_model import BertCrossLayer
from . import heads, objectives, meter_utils
from .clip_model import build_model, adapt_position_encoding
from transformers import RobertaConfig, RobertaModel
import torch.distributed as dist
import copy
from flm.utils.utils import adapt_vocab_size
from flm.modules.flm_tools import get_corr_bi_attention_mask
class AllGather_multi(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor, args):
output = [torch.empty_like(tensor) for _ in range(args["world_size"])]
dist.all_gather(output, tensor)
ctx.rank = args["rank"]
ctx.batch_size = tensor.shape[0]
return torch.cat(output, 0)
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size *
ctx.rank: ctx.batch_size * (ctx.rank + 1)],
None, None,
)
class FLMTransformerSS(pl.LightningModule):
def __init__(self, config):
super().__init__()
try:
self.save_hyperparameters(config)
except:
pass
self.hparams.config = config
self.is_vit = ('swin' not in config['vit'])
# self.is_mae = 'mae' in config['vit']
if 'roberta' in config['tokenizer']:
bert_config = RobertaConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_top_layer"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
is_decoder=config["is_causal_mask"],
)
else:
bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_top_layer"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
is_decoder=config["is_causal_mask"],
)
resolution_after = config['image_size']
self.all_gather = AllGather_multi.apply
self.cross_modal_text_transform = nn.Linear(
config['input_text_embed_size'], config['hidden_size'])
self.cross_modal_text_transform.apply(objectives.init_weights)
self.cross_modal_image_transform = nn.Linear(
config['input_image_embed_size'], config['hidden_size'])
self.cross_modal_image_transform.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, config["hidden_size"])
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings_flm = nn.Embedding(
2, config["hidden_size_for_fusion"])
self.token_type_embeddings_flm.apply(objectives.init_weights)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
build_model(config['vit'], resolution_after=resolution_after)
if 'roberta' in config['tokenizer']:
RobertaModel.from_pretrained(config['tokenizer'])
else:
BertModel.from_pretrained(config['tokenizer'])
torch.distributed.barrier()
self.vit_model = build_model(
config['vit'], resolution_after=resolution_after)
self.causal_mask = config['is_causal_mask']
if config["text_encoder_from_scratch"]:
te_config = RobertaConfig.from_pretrained(config['tokenizer'])
if self.causal_mask:
te_config.is_decoder = True
self.text_transformer = BertModel(config=te_config)
# text_transformer_hidden_dim = te_config.hidden_size
elif 'roberta' in config['tokenizer']:
te_config = RobertaConfig.from_pretrained(config['tokenizer'])
if self.causal_mask:
te_config.is_decoder = True
self.text_transformer = RobertaModel.from_pretrained(
config['tokenizer'], config=te_config)
self.text_transformer.encoder.layer = nn.ModuleList(
[self.text_transformer.encoder.layer[_]
for _ in range(config['num_bottom_layer'])])
else:
te_config = BertModel.from_pretrained(config['tokenizer'])
if self.causal_mask:
te_config.is_decoder = True
self.text_transformer = BertModel.from_pretrained(
config['tokenizer'], config=te_config)
if True:
self.text_transformer.encoder.layer = nn.ModuleList(
[self.text_transformer.encoder.layer[_]
for _ in range(config['num_bottom_layer'])])
vocab_size = config["vocab_size"]
if config['add_new_bos_token']:
print('add two additional tokens')
vocab_size = config["vocab_size"] + 2
self.text_transformer.resize_token_embeddings(vocab_size)
bert_config.vocab_size = vocab_size
self.cross_modal_text_layers = nn.ModuleList(
[BertCrossLayer(bert_config)
for _ in range(config['num_top_layer'])])
self.cross_modal_text_layers.apply(objectives.init_weights)
self.cross_modal_layers = self.cross_modal_text_layers
self.cross_modal_image_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_image_pooler.apply(objectives.init_weights)
self.cross_modal_text_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_text_pooler.apply(objectives.init_weights)
if config["loss_names"]["mlm"] > 0:
self.mlm_score = heads.MLMHead(bert_config)
self.mlm_score.apply(objectives.init_weights)
if config["loss_names"]["ar"] > 0:
self.lm_score = heads.MLMHead(bert_config)
self.lm_score.apply(objectives.init_weights)
if config["flm_backbone"]:
self.text_transformer2 = self.text_transformer
self.cross_modal_text_layers_r = self.cross_modal_text_layers
# self.cross_modal_text_pooler_r = self.cross_modal_text_pooler
self.cross_modal_text_transform_r = self.cross_modal_text_transform
self.cross_modal_text_transform_f = self.cross_modal_text_transform
self.cross_modal_text_pooler_f = heads.Pooler(
config["hidden_size_for_fusion"])
self.cross_modal_text_pooler_f.apply(objectives.init_weights)
self.fusion_token_embedding = self.text_transformer.embeddings
bert_config_fusion = copy.deepcopy(bert_config)
bert_config_fusion.hidden_size = config['hidden_size_for_fusion']
bert_config_fusion.num_attention_heads = config['num_heads_fusion']
self.fusion_layers_top = nn.ModuleList([BertCrossLayer(
bert_config_fusion)
for _ in range(config['num_reconstructor_top_layer'])])
self.fusion_layers_bottom = nn.ModuleList(
[BertCrossLayer(bert_config_fusion)
for _ in range(config['num_reconstructor_bottom_layer'])])
if True: # remove unused params in self-attention layers
for layer in self.fusion_layers_top:
layer.attention = None
for layer in self.fusion_layers_bottom:
layer.attention = None
self.lm_type_embeddings = nn.Embedding(2, config["hidden_size"])
if config["loss_names"]["flm"] > 0:
self.lm_score = heads.MLMHead(bert_config_fusion)
self.lm_score.apply(objectives.init_weights)
if config['share_lm_scorer_weights']:
self.lm_score_r = self.lm_score
self.lm_score_f = self.lm_score
else:
self.lm_score_r = heads.MLMHead(bert_config_fusion)
self.lm_score_r.apply(objectives.init_weights)
self.lm_score_f = heads.MLMHead(bert_config_fusion)
self.lm_score_f.apply(objectives.init_weights)
if config["loss_names"]["itm"] > 0:
self.itm_score = heads.ITMHead(config["hidden_size"])
self.itm_score_flm = heads.ITMHead(
config["hidden_size_for_fusion"])
self.itm_score.apply(objectives.init_weights)
self.itm_score_flm.apply(objectives.init_weights)
hs = self.hparams.config["hidden_size"] if not config['flm_backbone'] \
else self.hparams.config["hidden_size_for_fusion"]
if self.hparams.config["loss_names"]["vqa"] > 0:
vs = self.hparams.config["vqav2_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Linear(hs, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
# ===================== Downstream ===================== #
if (
self.hparams.config["load_path"] != ""
and not self.hparams.config["test_only"]
):
ckpt = torch.load(
self.hparams.config["load_path"], map_location="cpu")
state_dict = ckpt["state_dict"]
state_dict = adapt_position_encoding(
state_dict, after=resolution_after,
patch_size=self.hparams.config['patch_size'])
state_dict = adapt_vocab_size(state_dict, vocab_size)
if True:
r = self.load_state_dict(state_dict, strict=False)
print(' Missing keys in loading pretrained model: {},\
Unexpected keys number: {}'.format(
(r.missing_keys), (r.unexpected_keys)))
if self.hparams.config["loss_names"]["nlvr2"] > 0:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["nlvr2"] > 0:
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(
3, config['hidden_size']) # TODO
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
if self.hparams.config["loss_names"]["irtr"] > 0:
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.itm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.itm_score.fc.bias.data[1:]
self.margin = 0.2
for p in self.itm_score.parameters():
p.requires_grad = False
meter_utils.set_metrics(self)
self.current_tasks = list()
# ===================== load downstream (test_only) ======================
if self.hparams.config["load_path"] != "" and \
self.hparams.config["test_only"]:
ckpt = torch.load(
self.hparams.config["load_path"], map_location="cpu")
state_dict = ckpt["state_dict"]
state_dict = adapt_position_encoding(
state_dict, after=resolution_after,
patch_size=self.hparams.config['patch_size'])
state_dict = adapt_vocab_size(state_dict, vocab_size)
r = self.load_state_dict(state_dict, strict=False)
print(' Missing keys in loading pretrained model: {}, \
Unexpected keys number: {}'.format(
(r.missing_keys), (r.unexpected_keys)))
self.config = config
def infer(self, *args, **kargs):
if 'flm_backbone' in kargs:
is_flm_backbone = kargs.pop('flm_backbone')
else:
is_flm_backbone = self.config['flm_backbone']
if is_flm_backbone:
return self.infer_three_stream(*args, **kargs)
else:
return self.infer_one_stream(*args, **kargs)
def get_extended_attention_mask(self, attention_mask, input_shape, device, is_decoder=False):
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(
batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - \
causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length,
prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None,
:, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def infer_one_stream(
self,
batch,
mask_text=False,
do_lm=False,
image_token_type_idx=1,
img=None,
return_intermediate=False,
image_only=False,
text_only=False,
enable_causal_mask=None,
txt_key="text",
keep_image_token_embed=False,
):
is_decoder = self.causal_mask if enable_causal_mask is None else enable_causal_mask
if not text_only:
if True:
if img is None:
if f"image_{image_token_type_idx - 1}" in batch:
imgkey = f"image_{image_token_type_idx - 1}"
else:
imgkey = "image"
img = batch[imgkey][0]
raw_image_embeds = self.vit_model(img)
if image_only:
return {"image_embeds": raw_image_embeds}
input_suffix = "_mlm" if mask_text else ""
text_ids = batch[f"{txt_key}_ids{input_suffix}"]
output_suffix = "_lm" if do_lm else input_suffix
text_labels = batch[f"{txt_key}_labels{output_suffix}"]
text_masks = batch[f"{txt_key}_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
device = text_embeds.device
input_shape = text_masks.size()
extend_text_masks = self.get_extended_attention_mask(
text_masks, input_shape, device, is_decoder)
# if is_decoder and self.causal_mask_w_post_cls:
if is_decoder:
extend_text_masks = torch.cat(
(extend_text_masks[:, :, -1:], extend_text_masks[:, :, 1:]), dim=2)
extend_text_masks[:, :, 1:, 0] = -10000.
num_bottom_layer = self.config['num_bottom_layer']
for layer in self.text_transformer.encoder.layer[:num_bottom_layer]:
text_embeds = layer(
text_embeds, attention_mask=extend_text_masks)[0]
raw_text_embeds = text_embeds
if text_only:
return {"text_embeds": raw_text_embeds}
if return_intermediate:
ret = {
"text_embeds": raw_text_embeds,
"text_mask": extend_text_masks,
"image_embeds": raw_image_embeds,
"image_feats": raw_image_embeds,
'img': img,
}
return ret
# Cross-Modal Fusion
text_embeds = self.cross_modal_text_transform(raw_text_embeds)
text_embeds = text_embeds + \
self.token_type_embeddings(
torch.zeros_like(text_embeds[..., 0]).long())
image_embeds = self.cross_modal_image_transform(raw_image_embeds)
image_masks = torch.ones((image_embeds.size(0), image_embeds.size(
1)), dtype=torch.long, device=image_embeds.device)
extend_image_masks = image_masks.reshape(
image_masks.size(0), 1, 1, image_masks.size(1))
extend_image_masks = (1.0 - extend_image_masks) * -10000.0
if keep_image_token_embed:
image_embeds = image_embeds + self.token_type_embeddings(
torch.full(image_embeds.shape[:2], 1, device=image_embeds.device))
else:
image_embeds = image_embeds + self.token_type_embeddings(torch.full(
image_embeds.shape[:2], image_token_type_idx, device=image_embeds.device))
x, y = text_embeds, image_embeds
for text_layer in self.cross_modal_text_layers:
x1 = text_layer(x, y, attention_mask=extend_text_masks,
encoder_attention_mask=extend_image_masks)
x = x1[0]
text_feats, image_feats = x, y
cls_feats = self.cross_modal_text_pooler(x)
cls_feats = torch.cat([cls_feats, cls_feats], dim=-1)
ret = {
"text_embeds": raw_text_embeds,
"image_embeds": raw_image_embeds,
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
'img': img,
}
return ret
def get_img_text_merged_mask(self, text_mask, image_mask, is_causal=False, pad_token_id=0):
b, hn, w1, h1 = text_mask.shape
w2, h2 = image_mask.shape[-2:]
if w1 == 1 and w1 != h1:
text_mask = text_mask.expand([-1, -1, h1, -1])
if w2 == 1 and w2 != h2:
image_mask = image_mask.expand([-1, -1, h2, -1])
top_pad = torch.ones(
(b, hn, h1, h2), device=text_mask.device) * pad_token_id
down_pad = torch.ones(
(b, hn, h2, h1), device=image_mask.device) * pad_token_id
if is_causal:
top_pad = torch.ones(
(b, hn, h1, w2), device=text_mask.device) * (-10000.)
top = torch.cat([text_mask, top_pad], dim=-1)
down = torch.cat([down_pad, image_mask], dim=-1)
mask = torch.cat([top, down], dim=-2)
return mask
def infer_three_stream(
self,
batch,
mask_text=False,
do_lm=False,
image_token_type_idx=1,
img=None,
return_intermediate=False,
image_only=False,
text_only=False,
enable_causal_mask=None,
txt_key='text',
keep_image_token_embed=False
):
assert mask_text is False
do_lm = True
if not text_only:
if img is None:
if f"image_{image_token_type_idx - 1}" in batch:
imgkey = f"image_{image_token_type_idx - 1}"
else:
imgkey = "image"
img = batch[imgkey][0]
image_only_embeds = self.vit_model(img)
if image_only:
return {"image_embeds": image_only_embeds}
input_suffix = "_mlm" if mask_text else ""
text_ids = batch[f"{txt_key}_ids{input_suffix}"]
output_suffix = "_lm" if do_lm else input_suffix
text_labels = batch[f"{txt_key}_labels{output_suffix}"]
text_masks = batch[f"{txt_key}_masks"]
fusion_ids = batch[f"{txt_key}_all_masks_ids"]
if self.config.get('only_use_cls_for_flm', False):
fusion_ids = fusion_ids[:, :1]
flm_masks = batch[f"{txt_key}_flm_masks"].unsqueeze(dim=1)
text_embeds_f = self.fusion_token_embedding(input_ids=fusion_ids)
# if hasattr(self, 'dim_expand_flag_bottom') and self.dim_expand_flag_bottom:
# text_embeds_f = self.query_to_fusion_dim(text_embeds_f)
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
text_embeds_r = self.text_transformer2.embeddings(input_ids=text_ids)
device = text_embeds.device
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(
text_masks, input_shape, device)
if (not self.training and self.config['full_att_mask_for_eval']) or self.config['full_att_mask']:
extend_text_masks = torch.ones_like(
text_masks)[:, None, :, None] * text_masks[:, None, None, :]
nonpad_area = (text_masks.unsqueeze(
2) * text_masks.unsqueeze(1)).unsqueeze(1)
nonpad_area[:, :, 0] = 0
nonpad_area[:, :, :, 0] = 0
if self.causal_mask:
extend_text_masks = torch.cat(
(extend_text_masks[:, :, -1:], extend_text_masks[:, :, 1:]), dim=2)
extend_text_masks[:, :, 1:, 0] = -10000.
if (not self.training and self.config['full_att_mask_for_eval']) or self.config['full_att_mask']:
extend_text_masks_r = extend_text_masks
elif self.training and self.config["random_flm_mask"]:
extend_text_masks = flm_masks * nonpad_area + \
(1-nonpad_area) * extend_text_masks
extend_text_masks_r = flm_masks.transpose(
2, 3) * nonpad_area + (1-nonpad_area) * flm_masks
else:
extend_text_masks_r = extend_text_masks.transpose(
2, 3) * nonpad_area + (1-nonpad_area) * extend_text_masks
if (not self.training and self.config['full_att_mask_for_eval']) or self.config['full_att_mask']:
bi_attention_mask = torch.cat(
(extend_text_masks, extend_text_masks_r), dim=-1)
else:
if self.config['span_corruption_rate'] > 0:
bi_attention_mask = get_corr_bi_attention_mask(
extend_text_masks, extend_text_masks_r, self.config['span_corruption_rate'])
else:
bi_attention_mask = self.get_bi_attention_mask(
extend_text_masks, extend_text_masks_r)
if self.config.get('only_use_cls_for_flm', False):
bi_attention_mask = bi_attention_mask[:, :, :1, :]
num_bottom_layer = self.config['num_bottom_layer']
assert self.config['num_reconstructor_bottom_layer'] <= num_bottom_layer
for i in range(num_bottom_layer):
text_embeds = self.text_transformer.encoder.layer[i](
text_embeds, attention_mask=extend_text_masks)[0]
text_embeds_r = self.text_transformer2.encoder.layer[i](
text_embeds_r, attention_mask=extend_text_masks_r)[0]
t_num_layers = num_bottom_layer - \
self.config['num_reconstructor_bottom_layer']
if i >= t_num_layers:
bi_contexts = torch.cat((text_embeds, text_embeds_r), dim=1)
text_embeds_f = self.fusion_layers_bottom[i-t_num_layers](
text_embeds_f, bi_contexts, attention_mask=None,
encoder_attention_mask=bi_attention_mask,
disable_self_attention=True)[0]
if self.config['num_reconstructor_bottom_layer'] > 0:
text_only_embeds = text_embeds_f
else:
text_only_embeds = text_embeds + text_embeds_r
if text_only:
return {"text_embeds": text_only_embeds}
if return_intermediate:
ret = {
"text_embeds": text_only_embeds,
"image_embeds": image_only_embeds,
"text_mask": text_masks,
}
return ret
text_embeds = self.cross_modal_text_transform(text_embeds)
text_embeds_r = self.cross_modal_text_transform_r(text_embeds_r)
text_embeds_f = self.cross_modal_text_transform_f(text_embeds_f)
text_embeds = text_embeds + \
self.token_type_embeddings(torch.zeros_like(text_masks))
text_embeds_r = text_embeds_r + \
self.token_type_embeddings_flm(torch.zeros_like(text_masks))
text_embeds_f = text_embeds_f + \
self.token_type_embeddings_flm(torch.ones_like(fusion_ids))
image_embeds = self.cross_modal_image_transform(image_only_embeds)
image_masks = torch.ones((image_embeds.size(0), image_embeds.size(
1)), dtype=torch.long, device=image_embeds.device)
extend_image_masks = image_masks.reshape(
image_masks.size(0), 1, 1, image_masks.size(1))
if keep_image_token_embed:
image_embeds = image_embeds + self.token_type_embeddings(
torch.full_like(image_masks, 1))
else:
image_embeds = image_embeds + self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx))
x, y = text_embeds, image_embeds
x_r, x_f = text_embeds_r, text_embeds_f
num_top_layer = self.config['num_top_layer']
for i in range(num_top_layer):
x = self.cross_modal_text_layers[i](
x, y,
attention_mask=extend_text_masks,
encoder_attention_mask=extend_image_masks)[0]
x_r = self.cross_modal_text_layers_r[i](
x_r, y,
attention_mask=extend_text_masks_r,
encoder_attention_mask=extend_image_masks)[0]
t_only_num_layer = num_top_layer - \
self.config['num_reconstructor_top_layer']
if i >= t_only_num_layer:
bi_contexts = torch.cat([x, x_r], dim=1)
x_f = self.fusion_layers_top[i-t_only_num_layer](
x_f, bi_contexts,
attention_mask=None,
encoder_attention_mask=bi_attention_mask,
disable_self_attention=True)[0]
text_feats, image_feats = x_f, y
text_feats1 = x
text_feats2 = x_r
cls_feats = self.cross_modal_text_pooler_f(x_f)
ret = {
"text_embeds": text_only_embeds,
"image_embeds": image_only_embeds,
"text_feats": text_feats,
"text_feats1": text_feats1,
"text_feats2": text_feats2,
"cls_feats": cls_feats,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks
}
return ret
def get_bi_attention_mask(self, mask, mask_r):
N = mask.shape[-1]
bi_mask = torch.cat([mask, mask_r], dim=-1)
bi_mask[:, :, torch.arange(1, N), torch.arange(1, N)] = -10000.
bi_mask[:, :, torch.arange(1, N), N + torch.arange(1, N)] = -10000.
return bi_mask
def forward(self, batch):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ss_backbone = True
enable_causal_mask = False
ret.update(objectives.compute_mlm(
self, batch, ss_backbone, enable_causal_mask))
# Language Modeling
if "ar" in self.current_tasks:
ret.update(objectives.compute_lm(self, batch))
# Language Modeling
if "flm" in self.current_tasks:
ret.update(objectives.compute_lm(self, batch, lm_type='flm'))
# Image Text Matching
if "itm" in self.current_tasks:
enable_causal_mask = False
ss_backbone = True
ret.update(objectives.compute_itm(
self, batch, ss_backbone, enable_causal_mask))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch))
# Natural Language for Visual Reasoning 2
if "nlvr2" in self.current_tasks:
ret.update(objectives.compute_nlvr2(self, batch))
# Image Retrieval and Text Retrieval
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch))
return ret
def training_step(self, batch, batch_idx):
if self.config['debug']:
print('train step: ', self.current_epoch,
self.global_step, 'batch_idx: ', batch_idx)
if batch['is_sep_mlm']:
total_loss = 0
for _batch in batch['batch']:
total_loss += self.sub_training_step(_batch, batch_idx)
total_loss = total_loss / len(batch['batch'])
return total_loss
else:
return self.sub_training_step(batch, batch_idx)
def sub_training_step(self, batch, batch_idx):
meter_utils.set_task(self)
output = self(batch)
loss_weights = self.hparams.config["loss_names"]
# pdb.set_trace()
total_loss = sum([loss_weights[k.split('_')[0]] *
v for k, v in output.items() if "loss" in k])
if self.config['debug']:
# import pdb
# pdb.set_trace()
print(' ', [(k, v) for k, v in output.items() if "loss" in k])
print(' total_loss: {}'.format(total_loss))
return total_loss
def training_epoch_end(self, outs):
meter_utils.epoch_wrapup(self)
def validation_step(self, batch, batch_idx):
if self.config['debug']:
print('val step: ', self.current_epoch,
self.global_step, 'batch_idx: ', batch_idx)
meter_utils.set_task(self)
output = self(batch)
def validation_epoch_end(self, outs):
meter_utils.epoch_wrapup(self)
def test_step(self, batch, batch_idx):
meter_utils.set_task(self)
if not self.hparams.config['skip_test_step']:
output = self(batch)
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["load_path"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(
outs, model_name, self.config['exp_path'])
meter_utils.epoch_wrapup(self)
def configure_optimizers(self):
return meter_utils.set_schedule(self)
| 33,610 | 41.871173 | 122 | py |
FLM | FLM-master/flm/modules/heads.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
x = self.fc(x)
return x
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
| 1,280 | 27.466667 | 78 | py |
FLM | FLM-master/flm/datasets/base_dataset.py | import random
import torch
import io
import pyarrow as pa
import os
import pdb
from PIL import Image
from ..transforms import keys_to_transforms
import pdb
import copy
class BaseDataset(torch.utils.data.Dataset):
def __init__(
self,
data_dir: str,
transform_keys: list,
image_size: int,
names: list,
text_column_name: str = "",
remove_duplicate=True,
max_text_len=40,
max_dataset_len=-1,
draw_false_image=0,
draw_false_text=0,
image_only=False,
tokenizer=None,
disable_sep_mlm=True,
text_preprocessor=None,
):
"""
data_dir : where dataset file *.arrow lives; existence should be guaranteed via DataModule.prepare_data
transform_keys : keys for generating augmented views of images
text_column_name : pyarrow table column name that has list of strings as elements
"""
assert len(transform_keys) >= 1
super().__init__()
self.transforms = keys_to_transforms(transform_keys, size=image_size)
self.clip_transform = False
for transform_key in transform_keys:
if 'clip' in transform_key:
self.clip_transform = True
break
self.text_column_name = text_column_name
self.names = names
self.max_text_len = max_text_len
self.draw_false_image = draw_false_image
self.draw_false_text = draw_false_text
self.image_only = image_only
self.data_dir = data_dir
self.disable_sep_mlm = disable_sep_mlm
self.text_preprocessor = text_preprocessor
if len(names) != 0:
tables = [
pa.ipc.RecordBatchFileReader(
pa.memory_map(f"{data_dir}/{name}.arrow", "r")
).read_all()
for name in names
if os.path.isfile(f"{data_dir}/{name}.arrow")
]
self.table_names = list()
for i, name in enumerate(names):
self.table_names += [name] * len(tables[i])
if max_dataset_len != -1:
self.table = pa.concat_tables(tables, promote=True)[
:max_dataset_len]
print(' truncate the dataset with length: {}'.format(max_dataset_len))
else:
self.table = pa.concat_tables(tables, promote=True)
if text_column_name != "":
self.text_column_name = text_column_name
self.all_texts = self.table[text_column_name].to_pandas(
).tolist()
if type(self.all_texts[0][0]) == str:
if type(self.all_texts[0]) == str:
self.all_texts = [
[self.text_preprocessor(text)] for text in self.all_texts]
else:
self.all_texts = (
[list(set([self.text_preprocessor(text) for text in texts]))
for texts in self.all_texts]
if remove_duplicate
else self.all_texts
)
else: # snli
self.all_texts = (
[[t[1].strip() for t in texts]
for texts in self.all_texts]
)
else:
self.all_texts = list()
self.index_mapper = dict()
if text_column_name != "" and not self.image_only:
j = 0
for i, texts in enumerate(self.all_texts):
for _j in range(len(texts)):
self.index_mapper[j] = (i, _j)
j += 1
else:
for i in range(len(self.table)):
self.index_mapper[i] = (i, None)
# print(' Dataset length', len(self.index_mapper))
else:
self.index_mapper = dict()
self.all_texts = list()
@property
def corpus(self):
return [text for texts in self.all_texts for text in texts]
def __len__(self):
return len(self.index_mapper)
def get_raw_image(self, index, image_key="image"):
index, caption_index = self.index_mapper[index]
image_bytes = io.BytesIO(self.table[image_key][index].as_py())
image_bytes.seek(0)
if self.clip_transform:
return Image.open(image_bytes).convert("RGBA")
else:
return Image.open(image_bytes).convert("RGB")
def get_image(self, index, image_key="image"):
image = self.get_raw_image(index, image_key=image_key)
image_tensor = [tr(image) for tr in self.transforms]
return {
"image": image_tensor,
"img_index": self.index_mapper[index][0],
"cap_index": self.index_mapper[index][1],
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
"""get false images for image-text matching loss"""
random_index = random.randint(0, len(self.index_mapper) - 1)
image = self.get_raw_image(random_index, image_key=image_key)
image_tensor = [tr(image) for tr in self.transforms]
return {f"false_image_{rep}": image_tensor}
def get_text(self, raw_index):
index, caption_index = self.index_mapper[raw_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"img_index": index,
"cap_index": caption_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
"""get false text for image-text matching loss"""
random_index = random.randint(0, len(self.index_mapper) - 1)
index, caption_index = self.index_mapper[random_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
while result is None:
try:
ret = dict()
ret.update(self.get_image(index))
if not self.image_only:
txt = self.get_text(index)
ret.update(
{"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(
f"Error while read file idx {index} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.index_mapper) - 1)
return ret
def collate(self, batch, mlm_collator):
batch_size = len(batch)
keys = set([key for b in batch for key in b.keys()])
raw_dict_batch = {
k: [dic[k] if k in dic else None for dic in batch] for k in keys}
img_keys = [k for k in list(raw_dict_batch.keys()) if "image" in k]
img_sizes = list()
for img_key in img_keys:
img = raw_dict_batch[img_key]
img_sizes += [ii.shape for i in img if i is not None for ii in i]
for size in img_sizes:
assert (
len(size) == 3
), f"Collate error, an image should be in shape of (3, H, W), instead of given {size}"
if len(img_keys) != 0:
max_height = max([i[1] for i in img_sizes])
max_width = max([i[2] for i in img_sizes])
for img_key in img_keys:
img = raw_dict_batch[img_key]
view_size = len(img[0])
new_images = [
torch.zeros(batch_size, 3, max_height, max_width)
for _ in range(view_size)
]
for bi in range(batch_size):
orig_batch = img[bi]
for vi in range(view_size):
if orig_batch is None:
new_images[vi][bi] = None
else:
orig = img[bi][vi]
new_images[vi][bi, :, : orig.shape[1],
: orig.shape[2]] = orig
raw_dict_batch[img_key] = new_images
txt_keys = [k for k in list(raw_dict_batch.keys()) if "text" in k]
if len(txt_keys) != 0:
texts = [[d[0] for d in raw_dict_batch[txt_key]]
for txt_key in txt_keys]
encodings = [[d[1] for d in raw_dict_batch[txt_key]]
for txt_key in txt_keys]
flatten_encodings = [e for encoding in encodings for e in encoding]
flatten_mlms = mlm_collator['mlm_collator'](flatten_encodings)
is_sep_mlm = type(
flatten_mlms) == list and not self.disable_sep_mlm
flatten_mlms_all = flatten_mlms if type(
flatten_mlms) == list else [flatten_mlms]
dict_batch_sep_mlm = {'batch': []}
for flatten_mlms in flatten_mlms_all:
dict_batch = copy.deepcopy(raw_dict_batch)
for i, txt_key in enumerate(txt_keys):
texts, encodings = (
[d[0] for d in dict_batch[txt_key]],
[d[1] for d in dict_batch[txt_key]],
)
mlm_ids, mlm_labels = (
flatten_mlms["input_ids"][batch_size *
(i): batch_size * (i + 1)],
flatten_mlms["labels"][batch_size *
(i): batch_size * (i + 1)],
)
input_ids = torch.zeros_like(mlm_ids)
attention_mask = torch.zeros_like(mlm_ids)
for _i, encoding in enumerate(encodings):
_input_ids, _attention_mask = (
torch.tensor(encoding["input_ids"]),
torch.tensor(encoding["attention_mask"]),
)
input_ids[_i, : len(_input_ids)] = _input_ids
attention_mask[_i, : len(
_attention_mask)] = _attention_mask
lm_labels = input_ids[:, 1:]
if 'prefixLM_collator' in mlm_collator:
plm_att_mask, prefix_lm_labels = mlm_collator['prefixLM_collator'](
attention_mask, input_ids)
lm_labels = prefix_lm_labels[:, 1:]
dict_batch[f"{txt_key}_prefixlm_masks"] = plm_att_mask
dict_batch[txt_key] = texts
dict_batch[f"{txt_key}_ids"] = input_ids
dict_batch[f"{txt_key}_labels"] = torch.full_like(
input_ids, -100)
dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids
dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels
dict_batch[f"{txt_key}_labels_lm"] = lm_labels
dict_batch[f"{txt_key}_masks"] = attention_mask
dict_batch.update(self.get_flm_batch(
attention_mask, input_ids, mlm_collator, txt_key))
dict_batch_sep_mlm['batch'].append(dict_batch)
if not is_sep_mlm:
dict_batch['is_sep_mlm'] = False
return dict_batch
if is_sep_mlm:
dict_batch_sep_mlm['is_sep_mlm'] = True
return dict_batch_sep_mlm
return raw_dict_batch
def get_flm_batch(self, attention_mask, input_ids, mlm_collator, txt_key):
dict_batch = {}
all_mask_ids = attention_mask * \
self.tokenizer.convert_tokens_to_ids('<mask>')
text_len = attention_mask.sum(1)
all_mask_ids[:, 0] = input_ids[:, 0]
all_mask_ids[torch.arange(len(
text_len)), text_len - 1] = input_ids[torch.arange(len(text_len)), text_len - 1]
dict_batch[f"{txt_key}_all_masks_ids"] = all_mask_ids
flm_random_ids, flm_masks, flm_label = mlm_collator['flm_collator'](
attention_mask)
dict_batch[f"{txt_key}_flm_mask_ids"] = flm_random_ids
dict_batch[f"{txt_key}_flm_masks"] = flm_masks
dict_batch[f"{txt_key}_flm_labels"] = flm_label
return dict_batch
| 12,994 | 38.618902 | 111 | py |
FLM | FLM-master/flm/utils/utils.py | import torch
import torch.nn as nn
from flm.modules import heads, objectives, meter_utils
@torch.no_grad()
def adapt_vocab_size(state_dict, new_vocab_size):
for name in state_dict.keys():
if 'embeddings.word_embeddings.weight' in name or 'fusion_token_embedding.word_embeddings.weight' in name:
expand_vocab(name, state_dict, new_vocab_size)
# value = state_dict[name]
# old_vocab_size, old_embed_dim = value.shape
# if old_vocab_size != new_vocab_size:
# assert new_vocab_size > old_vocab_size
# new_embeddings = nn.Embedding(new_vocab_size, old_embed_dim)
# new_embeddings.apply(objectives.init_weights)
# new_embeddings.weight[:old_vocab_size] = value
# print(' replace vocab size of {} from {} to {}'.format(name ,old_vocab_size, new_vocab_size))
# state_dict[name] = new_embeddings.weight
output_params = ['mlm_score', 'lm_score', 'lm_score_r', 'lm_score_f']
for p in output_params:
weight_name = p + '.decoder.weight'
bias_name = p + '.bias'
if weight_name in name or bias_name in name:
expand_vocab(name, state_dict, new_vocab_size)
return state_dict
def expand_vocab(name, state_dict, new_vocab_size):
value = state_dict[name]
if value.shape[0] != new_vocab_size:
state_dict[name] = expand_tensor(value, new_vocab_size)
print(' replace vocab size of {} from {} to {}'.format(
name, value.shape[0], new_vocab_size))
def expand_tensor(value, new_vocab_size):
if value.ndim == 1:
old_vocab_size = value.shape[0]
new_embeddings = torch.zeros(new_vocab_size)
else:
old_vocab_size, old_embed_dim = value.shape
new_embeddings = torch.zeros(new_vocab_size, old_embed_dim)
assert new_vocab_size > old_vocab_size
new_embeddings.data.normal_(mean=0.0, std=0.02)
new_embeddings[:old_vocab_size] = value
return new_embeddings
| 2,054 | 36.363636 | 114 | py |
FLM | FLM-master/flm/utils/whole_word_masking.py | import random
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import torch
from torch.nn.utils.rnn import pad_sequence
# from ..file_utils import PaddingStrategy
# from ..modeling_utils import PreTrainedModel
from transformers.tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase
from transformers import (
DataCollatorForLanguageModeling)
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
"""
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _collate_batch(input_ids, self.tokenizer)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
if id == self.tokenizer.convert_tokens_to_ids('<bos>'):
token = '<bos>'
if id == self.tokenizer.convert_tokens_to_ids('<eos>'):
token = '<eos>'
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _collate_batch(mask_labels, self.tokenizer)
inputs, labels = self.mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(
1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert len(covered_indexes) == len(masked_lms)
mask_labels = [
1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(
special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(
labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(
labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(
len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def _collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# Check if padding is necessary.
length_of_first = examples[0].size(0)
are_tensors_same_length = all(
x.size(0) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * \
pad_to_multiple_of
result = examples[0].new_full(
[len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0]:] = example
return result
def tolist(x: Union[List[Any], torch.Tensor]):
return x.tolist() if isinstance(x, torch.Tensor) else x
| 7,709 | 40.902174 | 165 | py |
FLM | FLM-master/flm/transforms/transform.py | from .utils import (
inception_normalize,
imagenet_normalize,
MinMaxResize,
)
from PIL import Image
from torchvision import transforms
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from .randaug import RandAugment
def pixelbert_transform(size=800):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def imagenet_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
def imagenet_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def vit_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
def vit_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def clip_transform(size):
return Compose([
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
])
def clip_transform_randaug(size):
trs = Compose([
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
])
trs.transforms.insert(0, lambda image: image.convert('RGBA'))
trs.transforms.insert(0, RandAugment(2, 9))
trs.transforms.insert(0, lambda image: image.convert('RGB'))
return trs
def mae_transform_randaug(size):
trs = Compose([
transforms.RandomResizedCrop(size, scale=(
0.2, 1.0), interpolation=3), # 3 is bicubic
transforms.RandomHorizontalFlip(),
lambda image: image.convert("RGB"),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
0.229, 0.224, 0.225])
])
trs.transforms.insert(0, lambda image: image.convert('RGBA'))
trs.transforms.insert(0, RandAugment(2, 9))
trs.transforms.insert(0, lambda image: image.convert('RGB'))
return trs
def mae_transform(size):
trs = Compose([
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
0.229, 0.224, 0.225])
])
return trs
| 3,695 | 26.176471 | 83 | py |
FLM | FLM-master/flm/transforms/utils.py | from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min, scale * w
else:
newh, neww = scale * h, self.min
if max(newh, neww) > self.max:
scale = self.max / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
newh, neww = newh // 32 * 32, neww // 32 * 32
return x.resize((neww, newh), resample=Image.BICUBIC)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# This is simple maximum entropy normalization performed in Inception paper
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ViT uses simple non-biased inception normalization
# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ImageNet normalize
imagenet_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]
)
| 1,818 | 27.873016 | 98 | py |
FLM | FLM-master/flm/transforms/randaug.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL
import PIL.ImageOps
import PIL.ImageEnhance
import PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
| 7,008 | 24.673993 | 134 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/DWT_IDWT_layer.py | """
自定义 pytorch 层,实现一维、二维、三维张量的 DWT 和 IDWT,未考虑边界延拓
只有当图像行列数都是偶数,且重构滤波器组低频分量长度为 2 时,才能精确重构,否则在边界处有误差。
"""
import numpy as np
import math
from torch.nn import Module
from DWT_IDWT_Functions import *
import pywt
__all__ = ['DWT_1D', 'IDWT_1D', 'DWT_2D', 'IDWT_2D', 'DWT_3D', 'IDWT_3D', 'DWT_2D_tiny']
class DWT_1D(Module):
"""
input: the 1D data to be decomposed -- (N, C, Length)
output: lfc -- (N, C, Length/2)
hfc -- (N, C, Length/2)
"""
def __init__(self, wavename):
"""
1D discrete wavelet transform (DWT) for sequence decomposition
用于序列分解的一维离散小波变换 DWT
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(DWT_1D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.rec_lo
self.band_high = wavelet.rec_hi
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
生成变换矩阵
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = self.input_height
L = math.floor(L1 / 2)
matrix_h = np.zeros( ( L, L1 + self.band_length - 2 ) )
matrix_g = np.zeros( ( L1 - L, L1 + self.band_length - 2 ) )
end = None if self.band_length_half == 1 else (-self.band_length_half+1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index+j] = self.band_low[j]
index += 2
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index+j] = self.band_high[j]
index += 2
matrix_h = matrix_h[:,(self.band_length_half-1):end]
matrix_g = matrix_g[:,(self.band_length_half-1):end]
if torch.cuda.is_available():
self.matrix_low = torch.Tensor(matrix_h).cuda()
self.matrix_high = torch.Tensor(matrix_g).cuda()
else:
self.matrix_low = torch.Tensor(matrix_h)
self.matrix_high = torch.Tensor(matrix_g)
def forward(self, input):
"""
input_low_frequency_component = \mathcal{L} * input
input_high_frequency_component = \mathcal{H} * input
:param input: the data to be decomposed
:return: the low-frequency and high-frequency components of the input data
"""
assert len(input.size()) == 3
self.input_height = input.size()[-1]
self.get_matrix()
return DWTFunction_1D.apply(input, self.matrix_low, self.matrix_high)
class IDWT_1D(Module):
"""
input: lfc -- (N, C, Length/2)
hfc -- (N, C, Length/2)
output: the original data -- (N, C, Length)
"""
def __init__(self, wavename):
"""
1D inverse DWT (IDWT) for sequence reconstruction
用于序列重构的一维离散小波逆变换 IDWT
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(IDWT_1D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.dec_lo
self.band_high = wavelet.dec_hi
self.band_low.reverse()
self.band_high.reverse()
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
generating the matrices: \mathcal{L}, \mathcal{H}
生成变换矩阵
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = self.input_height
L = math.floor(L1 / 2)
matrix_h = np.zeros( ( L, L1 + self.band_length - 2 ) )
matrix_g = np.zeros( ( L1 - L, L1 + self.band_length - 2 ) )
end = None if self.band_length_half == 1 else (-self.band_length_half+1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index+j] = self.band_low[j]
index += 2
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index+j] = self.band_high[j]
index += 2
matrix_h = matrix_h[:,(self.band_length_half-1):end]
matrix_g = matrix_g[:,(self.band_length_half-1):end]
if torch.cuda.is_available():
self.matrix_low = torch.Tensor(matrix_h).cuda()
self.matrix_high = torch.Tensor(matrix_g).cuda()
else:
self.matrix_low = torch.Tensor(matrix_h)
self.matrix_high = torch.Tensor(matrix_g)
def forward(self, L, H):
"""
:param L: the low-frequency component of the original data
:param H: the high-frequency component of the original data
:return: the original data
"""
assert len(L.size()) == len(H.size()) == 3
self.input_height = L.size()[-1] + H.size()[-1]
self.get_matrix()
return IDWTFunction_1D.apply(L, H, self.matrix_low, self.matrix_high)
class DWT_2D_tiny(Module):
"""
input: the 2D data to be decomposed -- (N, C, H, W)
output -- lfc: (N, C, H/2, W/2)
#hfc_lh: (N, C, H/2, W/2)
#hfc_hl: (N, C, H/2, W/2)
#hfc_hh: (N, C, H/2, W/2)
DWT_2D_tiny only outputs the low-frequency component, which is used in WaveCNet;
the all four components could be get using DWT_2D, which is used in WaveUNet.
"""
def __init__(self, wavename):
"""
2D discrete wavelet transform (DWT) for 2D image decomposition
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(DWT_2D_tiny, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.rec_lo
self.band_high = wavelet.rec_hi
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
生成变换矩阵
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros( ( L, L1 + self.band_length - 2 ) )
matrix_g = np.zeros( ( L1 - L, L1 + self.band_length - 2 ) )
end = None if self.band_length_half == 1 else (-self.band_length_half+1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index+j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index+j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),0:(self.input_width + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:,(self.band_length_half-1):end]
matrix_h_1 = matrix_h_1[:,(self.band_length_half-1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_g_0 = matrix_g_0[:,(self.band_length_half-1):end]
matrix_g_1 = matrix_g_1[:,(self.band_length_half-1):end]
matrix_g_1 = np.transpose(matrix_g_1)
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
def forward(self, input):
"""
input_lfc = \mathcal{L} * input * \mathcal{L}^T
#input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
#input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
#input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
:param input: the 2D data to be decomposed
:return: the low-frequency component of the input 2D data
"""
assert len(input.size()) == 4
self.input_height = input.size()[-2]
self.input_width = input.size()[-1]
self.get_matrix()
return DWTFunction_2D_tiny.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
class DWT_2D(Module):
"""
input: the 2D data to be decomposed -- (N, C, H, W)
output -- lfc: (N, C, H/2, W/2)
hfc_lh: (N, C, H/2, W/2)
hfc_hl: (N, C, H/2, W/2)
hfc_hh: (N, C, H/2, W/2)
"""
def __init__(self, wavename):
"""
2D discrete wavelet transform (DWT) for 2D image decomposition
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(DWT_2D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.rec_lo
self.band_high = wavelet.rec_hi
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
生成变换矩阵
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros( ( L, L1 + self.band_length - 2 ) )
matrix_g = np.zeros( ( L1 - L, L1 + self.band_length - 2 ) )
end = None if self.band_length_half == 1 else (-self.band_length_half+1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index+j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index+j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),0:(self.input_width + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:,(self.band_length_half-1):end]
matrix_h_1 = matrix_h_1[:,(self.band_length_half-1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_g_0 = matrix_g_0[:,(self.band_length_half-1):end]
matrix_g_1 = matrix_g_1[:,(self.band_length_half-1):end]
matrix_g_1 = np.transpose(matrix_g_1)
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
def forward(self, input):
"""
input_lfc = \mathcal{L} * input * \mathcal{L}^T
input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
:param input: the 2D data to be decomposed
:return: the low-frequency and high-frequency components of the input 2D data
"""
assert len(input.size()) == 4
self.input_height = input.size()[-2]
self.input_width = input.size()[-1]
self.get_matrix()
return DWTFunction_2D.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
class IDWT_2D(Module):
"""
input: lfc -- (N, C, H/2, W/2)
hfc_lh -- (N, C, H/2, W/2)
hfc_hl -- (N, C, H/2, W/2)
hfc_hh -- (N, C, H/2, W/2)
output: the original 2D data -- (N, C, H, W)
"""
def __init__(self, wavename):
"""
2D inverse DWT (IDWT) for 2D image reconstruction
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(IDWT_2D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.dec_lo
self.band_low.reverse()
self.band_high = wavelet.dec_hi
self.band_high.reverse()
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
生成变换矩阵
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros( ( L, L1 + self.band_length - 2 ) )
matrix_g = np.zeros( ( L1 - L, L1 + self.band_length - 2 ) )
end = None if self.band_length_half == 1 else (-self.band_length_half+1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index+j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index+j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),0:(self.input_width + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:,(self.band_length_half-1):end]
matrix_h_1 = matrix_h_1[:,(self.band_length_half-1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_g_0 = matrix_g_0[:,(self.band_length_half-1):end]
matrix_g_1 = matrix_g_1[:,(self.band_length_half-1):end]
matrix_g_1 = np.transpose(matrix_g_1)
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
def forward(self, LL, LH, HL, HH):
"""
recontructing the original 2D data
the original 2D data = \mathcal{L}^T * lfc * \mathcal{L}
+ \mathcal{H}^T * hfc_lh * \mathcal{L}
+ \mathcal{L}^T * hfc_hl * \mathcal{H}
+ \mathcal{H}^T * hfc_hh * \mathcal{H}
:param LL: the low-frequency component
:param LH: the high-frequency component, hfc_lh
:param HL: the high-frequency component, hfc_hl
:param HH: the high-frequency component, hfc_hh
:return: the original 2D data
"""
assert len(LL.size()) == len(LH.size()) == len(HL.size()) == len(HH.size()) == 4
self.input_height = LL.size()[-2] + HH.size()[-2]
self.input_width = LL.size()[-1] + HH.size()[-1]
self.get_matrix()
return IDWTFunction_2D.apply(LL, LH, HL, HH, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
class DWT_3D(Module):
"""
input: the 3D data to be decomposed -- (N, C, D, H, W)
output: lfc -- (N, C, D/2, H/2, W/2)
hfc_llh -- (N, C, D/2, H/2, W/2)
hfc_lhl -- (N, C, D/2, H/2, W/2)
hfc_lhh -- (N, C, D/2, H/2, W/2)
hfc_hll -- (N, C, D/2, H/2, W/2)
hfc_hlh -- (N, C, D/2, H/2, W/2)
hfc_hhl -- (N, C, D/2, H/2, W/2)
hfc_hhh -- (N, C, D/2, H/2, W/2)
"""
def __init__(self, wavename):
"""
3D discrete wavelet transform (DWT) for 3D data decomposition
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(DWT_3D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.rec_lo
self.band_high = wavelet.rec_hi
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
生成变换矩阵
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros( ( L, L1 + self.band_length - 2 ) )
matrix_g = np.zeros( ( L1 - L, L1 + self.band_length - 2 ) )
end = None if self.band_length_half == 1 else (-self.band_length_half+1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index+j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
matrix_h_2 = matrix_h[0:(math.floor(self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index+j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),0:(self.input_width + self.band_length - 2)]
matrix_g_2 = matrix_g[0:(self.input_depth - math.floor(self.input_depth / 2)),0:(self.input_depth + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:,(self.band_length_half-1):end]
matrix_h_1 = matrix_h_1[:,(self.band_length_half-1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_h_2 = matrix_h_2[:,(self.band_length_half-1):end]
matrix_g_0 = matrix_g_0[:,(self.band_length_half-1):end]
matrix_g_1 = matrix_g_1[:,(self.band_length_half-1):end]
matrix_g_1 = np.transpose(matrix_g_1)
matrix_g_2 = matrix_g_2[:,(self.band_length_half-1):end]
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_low_2 = torch.Tensor(matrix_h_2).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
self.matrix_high_2 = torch.Tensor(matrix_g_2).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_low_2 = torch.Tensor(matrix_h_2)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
self.matrix_high_2 = torch.Tensor(matrix_g_2)
def forward(self, input):
"""
:param input: the 3D data to be decomposed
:return: the eight components of the input data, one low-frequency and seven high-frequency components
"""
assert len(input.size()) == 5
self.input_depth = input.size()[-3]
self.input_height = input.size()[-2]
self.input_width = input.size()[-1]
self.get_matrix()
return DWTFunction_3D.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_low_2,
self.matrix_high_0, self.matrix_high_1, self.matrix_high_2)
class IDWT_3D(Module):
"""
input: lfc -- (N, C, D/2, H/2, W/2)
hfc_llh -- (N, C, D/2, H/2, W/2)
hfc_lhl -- (N, C, D/2, H/2, W/2)
hfc_lhh -- (N, C, D/2, H/2, W/2)
hfc_hll -- (N, C, D/2, H/2, W/2)
hfc_hlh -- (N, C, D/2, H/2, W/2)
hfc_hhl -- (N, C, D/2, H/2, W/2)
hfc_hhh -- (N, C, D/2, H/2, W/2)
output: the original 3D data -- (N, C, D, H, W)
"""
def __init__(self, wavename):
"""
3D inverse DWT (IDWT) for 3D data reconstruction
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
"""
super(IDWT_3D, self).__init__()
wavelet = pywt.Wavelet(wavename)
self.band_low = wavelet.dec_lo
self.band_high = wavelet.dec_hi
self.band_low.reverse()
self.band_high.reverse()
assert len(self.band_low) == len(self.band_high)
self.band_length = len(self.band_low)
assert self.band_length % 2 == 0
self.band_length_half = math.floor(self.band_length / 2)
def get_matrix(self):
"""
生成变换矩阵
generating the matrices: \mathcal{L}, \mathcal{H}
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
"""
L1 = np.max((self.input_height, self.input_width))
L = math.floor(L1 / 2)
matrix_h = np.zeros( ( L, L1 + self.band_length - 2 ) )
matrix_g = np.zeros( ( L1 - L, L1 + self.band_length - 2 ) )
end = None if self.band_length_half == 1 else (-self.band_length_half+1)
index = 0
for i in range(L):
for j in range(self.band_length):
matrix_h[i, index+j] = self.band_low[j]
index += 2
matrix_h_0 = matrix_h[0:(math.floor(self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
matrix_h_1 = matrix_h[0:(math.floor(self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
matrix_h_2 = matrix_h[0:(math.floor(self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
index = 0
for i in range(L1 - L):
for j in range(self.band_length):
matrix_g[i, index+j] = self.band_high[j]
index += 2
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(self.input_height / 2)),0:(self.input_height + self.band_length - 2)]
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(self.input_width / 2)),0:(self.input_width + self.band_length - 2)]
matrix_g_2 = matrix_g[0:(self.input_depth - math.floor(self.input_depth / 2)),0:(self.input_depth + self.band_length - 2)]
matrix_h_0 = matrix_h_0[:,(self.band_length_half-1):end]
matrix_h_1 = matrix_h_1[:,(self.band_length_half-1):end]
matrix_h_1 = np.transpose(matrix_h_1)
matrix_h_2 = matrix_h_2[:,(self.band_length_half-1):end]
matrix_g_0 = matrix_g_0[:,(self.band_length_half-1):end]
matrix_g_1 = matrix_g_1[:,(self.band_length_half-1):end]
matrix_g_1 = np.transpose(matrix_g_1)
matrix_g_2 = matrix_g_2[:,(self.band_length_half-1):end]
if torch.cuda.is_available():
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
self.matrix_low_2 = torch.Tensor(matrix_h_2).cuda()
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
self.matrix_high_2 = torch.Tensor(matrix_g_2).cuda()
else:
self.matrix_low_0 = torch.Tensor(matrix_h_0)
self.matrix_low_1 = torch.Tensor(matrix_h_1)
self.matrix_low_2 = torch.Tensor(matrix_h_2)
self.matrix_high_0 = torch.Tensor(matrix_g_0)
self.matrix_high_1 = torch.Tensor(matrix_g_1)
self.matrix_high_2 = torch.Tensor(matrix_g_2)
def forward(self, LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH):
"""
:param LLL: the low-frequency component, lfc
:param LLH: the high-frequency componetn, hfc_llh
:param LHL: the high-frequency componetn, hfc_lhl
:param LHH: the high-frequency componetn, hfc_lhh
:param HLL: the high-frequency componetn, hfc_hll
:param HLH: the high-frequency componetn, hfc_hlh
:param HHL: the high-frequency componetn, hfc_hhl
:param HHH: the high-frequency componetn, hfc_hhh
:return: the original 3D input data
"""
assert len(LLL.size()) == len(LLH.size()) == len(LHL.size()) == len(LHH.size()) == 5
assert len(HLL.size()) == len(HLH.size()) == len(HHL.size()) == len(HHH.size()) == 5
self.input_depth = LLL.size()[-3] + HHH.size()[-3]
self.input_height = LLL.size()[-2] + HHH.size()[-2]
self.input_width = LLL.size()[-1] + HHH.size()[-1]
self.get_matrix()
return IDWTFunction_3D.apply(LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH,
self.matrix_low_0, self.matrix_low_1, self.matrix_low_2,
self.matrix_high_0, self.matrix_high_1, self.matrix_high_2)
if __name__ == '__main__':
from datetime import datetime
from torch.autograd import gradcheck
wavelet = pywt.Wavelet('bior1.1')
h = wavelet.rec_lo
g = wavelet.rec_hi
h_ = wavelet.dec_lo
g_ = wavelet.dec_hi
h_.reverse()
g_.reverse()
"""
image_full_name = '/home/li-qiufu/Pictures/standard_test_images/lena_color_512.tif'
image = cv2.imread(image_full_name, flags = 1)
image = image[0:512,0:512,:]
print(image.shape)
height, width, channel = image.shape
#image = image.reshape((1,height,width))
t0 = datetime.now()
for index in range(100):
m0 = DWT_2D(band_low = h, band_high = g)
image_tensor = torch.Tensor(image)
image_tensor.unsqueeze_(dim = 0)
print('image_re shape: {}'.format(image_tensor.size()))
image_tensor.transpose_(1,3)
print('image_re shape: {}'.format(image_tensor.size()))
image_tensor.transpose_(2,3)
print('image_re shape: {}'.format(image_tensor.size()))
image_tensor.requires_grad = False
LL, LH, HL, HH = m0(image_tensor)
matrix_low_0 = torch.Tensor(m0.matrix_low_0)
matrix_low_1 = torch.Tensor(m0.matrix_low_1)
matrix_high_0 = torch.Tensor(m0.matrix_high_0)
matrix_high_1 = torch.Tensor(m0.matrix_high_1)
#image_tensor.requires_grad = True
#input = (image_tensor.double(), matrix_low_0.double(), matrix_low_1.double(), matrix_high_0.double(), matrix_high_1.double())
#test = gradcheck(DWTFunction_2D.apply, input)
#print(test)
#print(LL.requires_grad)
#print(LH.requires_grad)
#print(HL.requires_grad)
#print(HH.requires_grad)
#LL.requires_grad = True
#input = (LL.double(), LH.double(), HL.double(), HH.double(), matrix_low_0.double(), matrix_low_1.double(), matrix_high_0.double(), matrix_high_1.double())
#test = gradcheck(IDWTFunction_2D.apply, input)
#print(test)
m1 = IDWT_2D(band_low = h_, band_high = g_)
image_re = m1(LL,LH,HL,HH)
t1 = datetime.now()
image_re.transpose_(2,3)
image_re.transpose_(1,3)
image_re_np = image_re.detach().numpy()
print('image_re shape: {}'.format(image_re_np.shape))
image_zero = image - image_re_np[0]
print(np.max(image_zero), np.min(image_zero))
print(image_zero[:,8])
print('taking {} secondes'.format(t1 - t0))
cv2.imshow('reconstruction', image_re_np[0]/255)
cv2.imshow('image_zero', image_zero/255)
cv2.waitKey(0)
"""
"""
image_full_name = '/home/liqiufu/Pictures/standard_test_images/lena_color_512.tif'
image = cv2.imread(image_full_name, flags = 1)
image = image[0:512,0:512,:]
print(image.shape)
image_3d = np.concatenate((image, image, image, image, image, image), axis = 2)
print(image_3d.shape)
image_tensor = torch.Tensor(image_3d)
#image_tensor = image_tensor.transpose(dim0 = 2, dim1 = 1)
#image_tensor = image_tensor.transpose(dim0 = 1, dim1 = 0)
image_tensor.unsqueeze_(dim = 0)
image_tensor.unsqueeze_(dim = 0)
t0 = datetime.now()
for index in range(10):
m0 = DWT_3D(wavename = 'haar')
print('image_re shape: {}'.format(image_tensor.size()))
image_tensor.requires_grad = False
LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = m0(image_tensor)
matrix_low_0 = torch.Tensor(m0.matrix_low_0)
matrix_low_1 = torch.Tensor(m0.matrix_low_1)
matrix_low_2 = torch.Tensor(m0.matrix_low_2)
matrix_high_0 = torch.Tensor(m0.matrix_high_0)
matrix_high_1 = torch.Tensor(m0.matrix_high_1)
matrix_high_2 = torch.Tensor(m0.matrix_high_2)
#image_tensor.requires_grad = True
#input = (image_tensor.double(), matrix_low_0.double(), matrix_low_1.double(), matrix_low_2.double(),
# matrix_high_0.double(), matrix_high_1.double(), matrix_high_2.double())
#test = gradcheck(DWTFunction_3D.apply, input)
#print('testing dwt3d -- {}'.format(test))
#LLL.requires_grad = True
#input = (LLL.double(), LLH.double(), LHL.double(), LHH.double(),
# HLL.double(), HLH.double(), HHL.double(), HHH.double(),
# matrix_low_0.double(), matrix_low_1.double(), matrix_low_2.double(),
# matrix_high_0.double(), matrix_high_1.double(), matrix_high_2.double())
#test = gradcheck(IDWTFunction_3D.apply, input)
#print('testing idwt3d -- {}'.format(test))
m1 = IDWT_3D(wavename = 'haar')
image_re = m1(LLL,LLH,LHL,LHH,HLL,HLH,HHL,HHH)
t1 = datetime.now()
image_re.squeeze_(dim = 0)
image_re.squeeze_(dim = 0)
#image_re.transpose_(0,1)
#image_re.transpose_(1,2)
image_re_np = image_re.detach().numpy()
print('image_re shape: {}'.format(image_re_np.shape))
image_zero = image - image_re_np[:,:,0:3]
print(np.max(image_zero), np.min(image_zero))
#print(image_zero[:,8,0])
print('taking {} secondes'.format(t1 - t0))
cv2.imshow('reconstruction', image_re_np[:,:,0:3]/255)
cv2.imshow('image_zero', image_zero/255)
cv2.waitKey(0)
"""
"""
import matplotlib.pyplot as plt
import numpy as np
vector_np = np.array(list(range(1280)))#.reshape((128,1))
print(vector_np.shape)
t0 = datetime.now()
for index in range(100):
vector = torch.Tensor(vector_np)
vector.unsqueeze_(dim = 0)
vector.unsqueeze_(dim = 0)
m0 = DWT_1D(band_low = h, band_high = g)
L, H = m0(vector)
#matrix_low = torch.Tensor(m0.matrix_low)
#matrix_high = torch.Tensor(m0.matrix_high)
#vector.requires_grad = True
#input = (vector.double(), matrix_low.double(), matrix_high.double())
#test = gradcheck(DWTFunction_1D.apply, input)
#print('testing 1D-DWT: {}'.format(test))
#print(L.requires_grad)
#print(H.requires_grad)
#L.requires_grad = True
#H.requires_grad = True
#input = (L.double(), H.double(), matrix_low.double(), matrix_high.double())
#test = gradcheck(IDWTFunction_1D.apply, input)
#print('testing 1D-IDWT: {}'.format(test))
m1 = IDWT_1D(band_low = h_, band_high = g_)
vector_re = m1(L, H)
t1 = datetime.now()
vector_re_np = vector_re.detach().numpy()
print('image_re shape: {}'.format(vector_re_np.shape))
vector_zero = vector_np - vector_re_np.reshape(vector_np.shape)
print(np.max(vector_zero), np.min(vector_zero))
print(vector_zero[:8])
print('taking {} secondes'.format(t1 - t0))
"""
| 33,457 | 43.081686 | 163 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/preactresnet.py | '''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
track_running_stats=True
affine=True
normal_func = nn.BatchNorm2d
# track_running_stats=False
# affine=True
# normal_func = nn.InstanceNorm2d
if not track_running_stats:
print('BN track False')
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1, activation='ReLU', softplus_beta=1):
super(PreActBlock, self).__init__()
self.bn1 = normal_func(in_planes, track_running_stats=track_running_stats, affine=affine)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
print('ReLU')
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
print('Softplus')
elif activation == 'GELU':
self.relu = nn.GELU()
print('GELU')
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('ELU')
elif activation == 'LeakyReLU':
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
print('LeakyReLU')
elif activation == 'SELU':
self.relu = nn.SELU(inplace=True)
print('SELU')
elif activation == 'CELU':
self.relu = nn.CELU(alpha=1.2, inplace=True)
print('CELU')
elif activation == 'Tanh':
self.relu = nn.Tanh()
print('Tanh')
def forward(self, x):
out = self.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(self.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1, activation='ReLU', softplus_beta=1):
super(PreActBottleneck, self).__init__()
self.bn1 = normal_func(in_planes, track_running_stats=track_running_stats, affine=affine)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, normalize = False, normalize_only_FN = False, scale = 15, activation='ReLU', softplus_beta=1):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.normalize = normalize
self.normalize_only_FN = normalize_only_FN
self.scale = scale
self.activation = activation
self.softplus_beta = softplus_beta
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = normal_func(512 * block.expansion, track_running_stats=track_running_stats, affine=affine)
if self.normalize:
self.linear = nn.Linear(512*block.expansion, num_classes, bias=False)
else:
self.linear = nn.Linear(512*block.expansion, num_classes)
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
print('ReLU')
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
print('Softplus')
elif activation == 'GELU':
self.relu = nn.GELU()
print('GELU')
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('ELU')
elif activation == 'LeakyReLU':
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
print('LeakyReLU')
elif activation == 'SELU':
self.relu = nn.SELU(inplace=True)
print('SELU')
elif activation == 'CELU':
self.relu = nn.CELU(alpha=1.2, inplace=True)
print('CELU')
elif activation == 'Tanh':
self.relu = nn.Tanh()
print('Tanh')
print('Use activation of ' + activation)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride,
activation=self.activation, softplus_beta=self.softplus_beta))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if self.normalize_only_FN:
out = F.normalize(out, p=2, dim=1)
if self.normalize:
out = F.normalize(out, p=2, dim=1) * self.scale
for _, module in self.linear.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
return self.linear(out)
def PreActResNet18(num_classes=10, normalize = False, normalize_only_FN = False, scale = 15, activation='ReLU', softplus_beta=1):
return PreActResNet(PreActBlock, [2,2,2,2], num_classes=num_classes, normalize = normalize
, normalize_only_FN = normalize_only_FN, scale = scale, activation=activation, softplus_beta=softplus_beta)
def PreActResNet34():
return PreActResNet(PreActBlock, [3,4,6,3])
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3,4,6,3])
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3,4,23,3])
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3,8,36,3])
def test():
net = PreActResNet18()
y = net((torch.randn(1,3,32,32)))
print(y.size())
# test()
| 7,760 | 37.044118 | 152 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/utils.py | import numpy as np
from collections import namedtuple
import torch
from torch import nn
import torchvision
from torch.optim.optimizer import Optimizer, required
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
################################################################
## Components from https://github.com/davidcpage/cifar10-fast ##
################################################################
#####################
## data preprocessing
#####################
cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def normalise(x, mean=cifar10_mean, std=cifar10_std):
x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]
x -= mean*255
x *= 1.0/(255*std)
return x
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
#####################
## data augmentation
#####################
class Crop(namedtuple('Crop', ('h', 'w'))):
def __call__(self, x, x0, y0):
return x[:,y0:y0+self.h,x0:x0+self.w]
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
def output_shape(self, x_shape):
C, H, W = x_shape
return (C, self.h, self.w)
class FlipLR(namedtuple('FlipLR', ())):
def __call__(self, x, choice):
return x[:, :, ::-1].copy() if choice else x
def options(self, x_shape):
return {'choice': [True, False]}
class Cutout(namedtuple('Cutout', ('h', 'w'))):
def __call__(self, x, x0, y0):
x = x.copy()
x[:,y0:y0+self.h,x0:x0+self.w].fill(0.0)
return x
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
class Transform():
def __init__(self, dataset, transforms):
self.dataset, self.transforms = dataset, transforms
self.choices = None
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data, labels = self.dataset[index]
for choices, f in zip(self.choices, self.transforms):
args = {k: v[index] for (k,v) in choices.items()}
data = f(data, **args)
return data, labels
def set_random_choices(self):
self.choices = []
x_shape = self.dataset[0][0].shape
N = len(self)
for t in self.transforms:
options = t.options(x_shape)
x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape
self.choices.append({k:np.random.choice(v, size=N) for (k,v) in options.items()})
#####################
## dataset
#####################
def cifar10(root):
train_set = torchvision.datasets.CIFAR10(root=root, train=True, download=True)
test_set = torchvision.datasets.CIFAR10(root=root, train=False, download=True)
return {
'train': {'data': train_set.data, 'labels': train_set.targets},
'test': {'data': test_set.data, 'labels': test_set.targets}
}
#####################
## data loading
#####################
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
#####################
## new optimizer
#####################
class SGD_GCC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GCC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GCC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers
if len(list(d_p.size()))>3:
d_p.add_(-d_p.mean(dim = tuple(range(1,len(list(d_p.size())))), keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
class SGD_GC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers and FC layers
if len(list(d_p.size()))>1:
d_p.add_(-d_p.mean(dim = tuple(range(1,len(list(d_p.size())))), keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
| 9,281 | 34.7 | 122 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/eval_cifar.py | import argparse
import copy
import logging
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from preactresnet import PreActResNet18
from wideresnet import WideResNet
from wideresnet_wavelet import WideResNetWavelet
from utils_plus import (upper_limit, lower_limit, std, clamp, get_loaders,
attack_pgd, evaluate_pgd, evaluate_standard, evaluate_fgsm, evaluate_mim, evaluate_new_fgsm)
from autoattack import AutoAttack
# installing AutoAttack by: pip install git+https://github.com/fra31/auto-attack
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--out-dir', default='train_fgsm_output', type=str, help='Output directory')
parser.add_argument('--seed', default=0, type=int, help='Random seed')
return parser.parse_args()
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.StreamHandler()
])
logger.info(args)
_, test_loader, test_loader_nonorm = get_loaders(args.data_dir, args.batch_size)
best_state_dict = torch.load(os.path.join(args.out_dir, 'model_best.pth'))
# Evaluation
#model_test = PreActResNet18().cuda()
model_test = WideResNetWavelet(34, 10, widen_factor=10, dropRate=0.0)
#model_test = WideResNet(34, 10, widen_factor=10, dropRate=0.0)
model_test = nn.DataParallel(model_test).cuda()
if 'state_dict' in best_state_dict.keys():
model_test.load_state_dict(best_state_dict['state_dict'])
else:
model_test.load_state_dict(best_state_dict)
model_test.float()
model_test.eval()
### Evaluate clean acc ###
_, test_acc = evaluate_standard(test_loader, model_test)
print('Clean acc: ', test_acc)
_, fgsm_acc = evaluate_fgsm(test_loader, model_test)
print('FGSM acc: ', fgsm_acc)
_, mim_acc = evaluate_mim(test_loader, model_test)
print('MIM acc: ', mim_acc)
#_, fgsm_new_acc = evaluate_pgd(test_loader, model_test, attack_iters=1, restarts=1, step=1, use_CWloss=False)
#print('FGSM new acc: ', fgsm_new_acc)
### Evaluate PGD (CE loss) acc ###
_, pgd_acc_CE = evaluate_pgd(test_loader, model_test, attack_iters=20, restarts=20, step=2, use_CWloss=False)
print('PGD-10 (10 restarts, step 2, CE loss) acc: ', pgd_acc_CE)
### Evaluate PGD (CW loss) acc ###
_, pgd_acc_CW = evaluate_pgd(test_loader, model_test, attack_iters=20, restarts=20, step=2, use_CWloss=True)
print('PGD-10 (10 restarts, step 2, CW loss) acc: ', pgd_acc_CW)
### Evaluate AutoAttack ###
l = [x for (x, y) in test_loader_nonorm]
x_test = torch.cat(l, 0)
l = [y for (x, y) in test_loader_nonorm]
y_test = torch.cat(l, 0)
class normalize_model():
def __init__(self, model):
self.model_test = model
def __call__(self, x):
x_norm = normalize(x)
return self.model_test(x_norm)
new_model = normalize_model(model_test)
epsilon = 8 / 255.
adversary = AutoAttack(new_model, norm='Linf', eps=epsilon, version='standard')
X_adv = adversary.run_standard_evaluation(x_test, y_test, bs=128)
if __name__ == "__main__":
main()
| 3,856 | 33.747748 | 114 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/wideresnet_wavelet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from DWT_IDWT_layer import *
class Downsample_v1(nn.Module):
def __init__(self, wavename = 'haar'):
super(Downsample_v1, self).__init__()
self.dwt = DWT_2D(wavename = wavename)
def forward(self, input):
LL, LH, HL, HH = self.dwt(input)
return torch.cat((LL, LH, HL, HH), dim = 1)
class Downsample_v2(nn.Module):
def __init__(self, wavename = 'haar'):
super(Downsample_v2, self).__init__()
self.dwt = DWT_2D(wavename = wavename)
def forward(self, input):
LL, LH, HL, HH = self.dwt(input)
return (LL + LH + HL + HH) / 4
class Downsample_v3(nn.Module):
def __init__(self, wavename = 'haar'):
super(Downsample_v3, self).__init__()
self.dwt = DWT_2D(wavename = wavename)
def forward(self, input):
LL, LH, HL, HH = self.dwt(input)
return LL
class UpSampling_v1(nn.Module):
def __init__(self, wavename = 'haar'):
super(UpSampling_v1, self).__init__()
self.idwt = IDWT_2D(wavename = wavename)
def forward(self, LL, LH, HL, HH):
return self.idwt(LL, LH, HL, HH)
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
#self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
# padding=1, bias=False)
if (stride ==1):
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
else:
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
if activation == 'ReLU':
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
print('R')
elif activation == 'Softplus':
self.relu1 = nn.Softplus(beta=softplus_beta, threshold=20)
self.relu2 = nn.Softplus(beta=softplus_beta, threshold=20)
print('S')
elif activation == 'GELU':
self.relu1 = nn.GELU()
self.relu2 = nn.GELU()
print('G')
elif activation == 'ELU':
self.relu1 = nn.ELU(alpha=1.0, inplace=True)
self.relu2 = nn.ELU(alpha=1.0, inplace=True)
print('E')
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
if stride == 1:
convShortCut = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)
else:
convShortCut = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.convShortcut = (not self.equalInOut) and convShortCut or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(NetworkBlock, self).__init__()
self.activation = activation
self.softplus_beta = softplus_beta
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate,
self.activation, self.softplus_beta))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNetWavelet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0, normalize=False, activation='ReLU', softplus_beta=1):
super(WideResNetWavelet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
self.normalize = normalize
#self.scale = scale
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.upsampling = UpSampling_v1(wavename = 'haar')
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
elif activation == 'GELU':
self.relu = nn.GELU()
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('Use activation of ' + activation)
self.avgpool = Downsample_v2(wavename = 'haar')
if self.normalize:
self.fc = nn.Linear(nChannels[3], num_classes, bias = False)
else:
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) and not self.normalize:
m.bias.data.zero_()
def forward(self, x):
#print(x.shape)
#x = self.avgpool(x)
#print(x.shape)
#x = x.view(128, 3, 32, 32)
#x = self.avgpool(x)
#x = self.upsampling(x[0:32], x[32:64], x[64:96], x[96:128])
#x = x.view(128, 3, 32, 32)
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
#print(out.shape)
#out = F.avg_pool2d(out, 8)
out = self.avgpool(out)
#out = self.avgpool(out)
#print(out.shape)
#dwt_shape = out.shape[0]
#print(out_intermediate.shape)
#out = self.upsampling(out, x1[32: 64],
# x1[64: 96], x1[96: 128])
#out =out.view(128, 640, 8, 8)
out = F.avg_pool2d(out, 4)
#print(out.shape)
#print(out.shape)
#print(self.nChannels)
out = out.view(-1, self.nChannels)
#print(out.shape)
if self.normalize:
out = F.normalize(out, p=2, dim=1)
for _, module in self.fc.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
return self.fc(out)
| 8,087 | 39.643216 | 141 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/utils_plus.py | #import apex.amp as amp
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
from torch.autograd import Variable
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
upper_limit = ((1 - mu)/ std)
lower_limit = ((0 - mu)/ std)
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def get_loaders(dir_, batch_size):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cifar10_mean, cifar10_std),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cifar10_mean, cifar10_std),
])
test_transform_nonorm = transforms.Compose([
transforms.ToTensor()
])
num_workers = 2
train_dataset = datasets.CIFAR10(
dir_, train=True, transform=train_transform, download=True)
test_dataset = datasets.CIFAR10(
dir_, train=False, transform=test_transform, download=True)
test_dataset_nonorm = datasets.CIFAR10(
dir_, train=False, transform=test_transform_nonorm, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=2,
)
test_loader_nonorm = torch.utils.data.DataLoader(
dataset=test_dataset_nonorm,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=2,
)
return train_loader, test_loader, test_loader_nonorm
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, use_CWloss=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
for zz in range(restarts):
delta = torch.zeros_like(X).cuda()
for i in range(len(epsilon)):
delta[:, i, :, :].uniform_(-epsilon[i][0][0].item(), epsilon[i][0][0].item())
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(X + delta)
index = torch.where(output.max(1)[1] == y)
if len(index[0]) == 0:
break
if use_CWloss:
loss = CW_loss(output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = delta[index[0], :, :, :]
g = grad[index[0], :, :, :]
d = clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
d = clamp(d, lower_limit - X[index[0], :, :, :], upper_limit - X[index[0], :, :, :])
delta.data[index[0], :, :, :] = d
delta.grad.zero_()
all_loss = F.cross_entropy(model(X+delta), y, reduction='none').detach()
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def evaluate_pgd(test_loader, model, attack_iters, restarts, step=2, use_CWloss=False):
epsilon = (8 / 255.) / std
alpha = (step / 255.) / std
pgd_loss = 0
pgd_acc = 0
n = 0
model.eval()
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
pgd_delta = attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, use_CWloss=use_CWloss)
with torch.no_grad():
output = model(X + pgd_delta)
loss = F.cross_entropy(output, y)
pgd_loss += loss.item() * y.size(0)
pgd_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
return pgd_loss/n, pgd_acc/n
def evaluate_mim(test_loader, model, num_steps=20, decay_factor=1.0):
test_loss = 0
test_acc = 0
n = 0
print(std)
epsilon = (8.0 / 255.0)/std
step_size = (2.0 / 255.0)/std
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
#output = model(X)
X_pgd = Variable(X.data, requires_grad=True)
delta = torch.zeros_like(X).cuda()
for i in range(len(epsilon)):
delta[:, i, :, :].uniform_(-epsilon[i][0][0].item(), epsilon[i][0][0].item())
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
X_pgd = Variable(X_pgd.data + delta, requires_grad=True)
previous_grad = torch.zeros_like(X.data)
for _ in range(num_steps):
opt = torch.optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = torch.nn.CrossEntropyLoss()(model(X_pgd),y)
loss.backward()
grad = X_pgd.grad.data / torch.mean(torch.abs(X_pgd.grad.data), [1,2,3], keepdim=True)
previous_grad = decay_factor * previous_grad + grad
X_pgd = Variable(X_pgd.data + step_size * previous_grad.sign(), requires_grad=True)
eta = clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, -1.0, 1.0), requires_grad=True)
test_loss += loss.item() * y.size(0)
test_acc += (model(X_pgd).max(1)[1] == y).float().sum().item()
n += y.size(0)
return test_loss/n, test_acc/n
def evaluate_fgsm(test_loader, model):
test_loss = 0
test_acc = 0
n = 0
print(std)
epsilon = (8.0 / 255.0)/std
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
#output = model(X)
X_fgsm = Variable(X.data, requires_grad=True)
opt = torch.optim.SGD([X_fgsm], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
#loss = F.cross_entropy(model(X_fgsm), y)
loss = torch.nn.CrossEntropyLoss()(model(X_fgsm),y)
loss.backward()
X_fgsm = Variable(torch.clamp(X_fgsm.data + epsilon * X_fgsm.grad.data.sign(), -1.0, 1.0), requires_grad=True)
test_loss += loss.item() * y.size(0)
test_acc += (model(X_fgsm).max(1)[1] == y).float().sum().item()
n += y.size(0)
return test_loss/n, test_acc/n
def evaluate_new_fgsm(test_loader, model):
test_loss = 0
test_acc = 0
n = 0
model.eval()
epsilon = (8 / 255.)/std
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
delta = torch.zeros_like(X).cuda()
for i in range(len(epsilon)):
delta[:, i, :, :].uniform_(-epsilon[i][0][0].item(), epsilon[i][0][0].item())
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
output = model(X + delta)
loss = F.cross_entropy(output, y)
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
#delta.grad.zero_()
return test_loss/n, test_acc/n
def evaluate_standard(test_loader, model):
test_loss = 0
test_acc = 0
n = 0
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
output = model(X)
loss = F.cross_entropy(output, y)
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
return test_loss/n, test_acc/n
| 8,417 | 36.247788 | 122 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/DWT_IDWT_Functions.py | # Copyright (c) 2019, Adobe Inc. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike
# 4.0 International Public License. To view a copy of this license, visit
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode.
"""
自定义pytorch函数,实现一维、二维、三维张量的DWT和IDWT,未考虑边界延拓
只有当图像行列数都是偶数,且重构滤波器组低频分量长度为2时,才能精确重构,否则在边界处有误差。
"""
import torch
from torch.autograd import Function
class DWTFunction_1D(Function):
@staticmethod
def forward(ctx, input, matrix_Low, matrix_High):
ctx.save_for_backward(matrix_Low, matrix_High)
L = torch.matmul(input, matrix_Low.t())
H = torch.matmul(input, matrix_High.t())
return L, H
@staticmethod
def backward(ctx, grad_L, grad_H):
matrix_L, matrix_H = ctx.saved_variables
grad_input = torch.add(torch.matmul(grad_L, matrix_L), torch.matmul(grad_H, matrix_H))
return grad_input, None, None
class IDWTFunction_1D(Function):
@staticmethod
def forward(ctx, input_L, input_H, matrix_L, matrix_H):
ctx.save_for_backward(matrix_L, matrix_H)
output = torch.add(torch.matmul(input_L, matrix_L), torch.matmul(input_H, matrix_H))
return output
@staticmethod
def backward(ctx, grad_output):
matrix_L, matrix_H = ctx.saved_variables
grad_L = torch.matmul(grad_output, matrix_L.t())
grad_H = torch.matmul(grad_output, matrix_H.t())
return grad_L, grad_H, None, None
class DWTFunction_2D(Function):
@staticmethod
def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1)
L = torch.matmul(matrix_Low_0, input)
H = torch.matmul(matrix_High_0, input)
LL = torch.matmul(L, matrix_Low_1)
LH = torch.matmul(L, matrix_High_1)
HL = torch.matmul(H, matrix_Low_1)
HH = torch.matmul(H, matrix_High_1)
return LL, LH, HL, HH
@staticmethod
def backward(ctx, grad_LL, grad_LH, grad_HL, grad_HH):
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
grad_L = torch.add(torch.matmul(grad_LL, matrix_Low_1.t()), torch.matmul(grad_LH, matrix_High_1.t()))
grad_H = torch.add(torch.matmul(grad_HL, matrix_Low_1.t()), torch.matmul(grad_HH, matrix_High_1.t()))
grad_input = torch.add(torch.matmul(matrix_Low_0.t(), grad_L), torch.matmul(matrix_High_0.t(), grad_H))
return grad_input, None, None, None, None
class DWTFunction_2D_tiny(Function):
@staticmethod
def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1)
L = torch.matmul(matrix_Low_0, input)
LL = torch.matmul(L, matrix_Low_1)
return LL
@staticmethod
def backward(ctx, grad_LL):
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
grad_L = torch.matmul(grad_LL, matrix_Low_1.t())
grad_input = torch.matmul(matrix_Low_0.t(), grad_L)
return grad_input, None, None, None, None
class IDWTFunction_2D(Function):
@staticmethod
def forward(ctx, input_LL, input_LH, input_HL, input_HH,
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1)
L = torch.add(torch.matmul(input_LL, matrix_Low_1.t()), torch.matmul(input_LH, matrix_High_1.t()))
H = torch.add(torch.matmul(input_HL, matrix_Low_1.t()), torch.matmul(input_HH, matrix_High_1.t()))
output = torch.add(torch.matmul(matrix_Low_0.t(), L), torch.matmul(matrix_High_0.t(), H))
return output
@staticmethod
def backward(ctx, grad_output):
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
grad_L = torch.matmul(matrix_Low_0, grad_output)
grad_H = torch.matmul(matrix_High_0, grad_output)
grad_LL = torch.matmul(grad_L, matrix_Low_1)
grad_LH = torch.matmul(grad_L, matrix_High_1)
grad_HL = torch.matmul(grad_H, matrix_Low_1)
grad_HH = torch.matmul(grad_H, matrix_High_1)
return grad_LL, grad_LH, grad_HL, grad_HH, None, None, None, None
class DWTFunction_3D(Function):
@staticmethod
def forward(ctx, input,
matrix_Low_0, matrix_Low_1, matrix_Low_2,
matrix_High_0, matrix_High_1, matrix_High_2):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_Low_2,
matrix_High_0, matrix_High_1, matrix_High_2)
L = torch.matmul(matrix_Low_0, input)
H = torch.matmul(matrix_High_0, input)
LL = torch.matmul(L, matrix_Low_1).transpose(dim0 = 2, dim1 = 3)
LH = torch.matmul(L, matrix_High_1).transpose(dim0 = 2, dim1 = 3)
HL = torch.matmul(H, matrix_Low_1).transpose(dim0 = 2, dim1 = 3)
HH = torch.matmul(H, matrix_High_1).transpose(dim0 = 2, dim1 = 3)
LLL = torch.matmul(matrix_Low_2, LL).transpose(dim0 = 2, dim1 = 3)
LLH = torch.matmul(matrix_Low_2, LH).transpose(dim0 = 2, dim1 = 3)
LHL = torch.matmul(matrix_Low_2, HL).transpose(dim0 = 2, dim1 = 3)
LHH = torch.matmul(matrix_Low_2, HH).transpose(dim0 = 2, dim1 = 3)
HLL = torch.matmul(matrix_High_2, LL).transpose(dim0 = 2, dim1 = 3)
HLH = torch.matmul(matrix_High_2, LH).transpose(dim0 = 2, dim1 = 3)
HHL = torch.matmul(matrix_High_2, HL).transpose(dim0 = 2, dim1 = 3)
HHH = torch.matmul(matrix_High_2, HH).transpose(dim0 = 2, dim1 = 3)
return LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH
@staticmethod
def backward(ctx, grad_LLL, grad_LLH, grad_LHL, grad_LHH,
grad_HLL, grad_HLH, grad_HHL, grad_HHH):
matrix_Low_0, matrix_Low_1, matrix_Low_2, matrix_High_0, matrix_High_1, matrix_High_2 = ctx.saved_variables
grad_LL = torch.add(torch.matmul(matrix_Low_2.t(), grad_LLL.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), grad_HLL.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
grad_LH = torch.add(torch.matmul(matrix_Low_2.t(), grad_LLH.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), grad_HLH.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
grad_HL = torch.add(torch.matmul(matrix_Low_2.t(), grad_LHL.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), grad_HHL.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
grad_HH = torch.add(torch.matmul(matrix_Low_2.t(), grad_LHH.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), grad_HHH.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
grad_L = torch.add(torch.matmul(grad_LL, matrix_Low_1.t()), torch.matmul(grad_LH, matrix_High_1.t()))
grad_H = torch.add(torch.matmul(grad_HL, matrix_Low_1.t()), torch.matmul(grad_HH, matrix_High_1.t()))
grad_input = torch.add(torch.matmul(matrix_Low_0.t(), grad_L), torch.matmul(matrix_High_0.t(), grad_H))
return grad_input, None, None, None, None, None, None, None, None
class IDWTFunction_3D(Function):
@staticmethod
def forward(ctx, input_LLL, input_LLH, input_LHL, input_LHH,
input_HLL, input_HLH, input_HHL, input_HHH,
matrix_Low_0, matrix_Low_1, matrix_Low_2,
matrix_High_0, matrix_High_1, matrix_High_2):
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_Low_2,
matrix_High_0, matrix_High_1, matrix_High_2)
input_LL = torch.add(torch.matmul(matrix_Low_2.t(), input_LLL.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), input_HLL.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
input_LH = torch.add(torch.matmul(matrix_Low_2.t(), input_LLH.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), input_HLH.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
input_HL = torch.add(torch.matmul(matrix_Low_2.t(), input_LHL.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), input_HHL.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
input_HH = torch.add(torch.matmul(matrix_Low_2.t(), input_LHH.transpose(dim0 = 2, dim1 = 3)), torch.matmul(matrix_High_2.t(), input_HHH.transpose(dim0 = 2, dim1 = 3))).transpose(dim0 = 2, dim1 = 3)
input_L = torch.add(torch.matmul(input_LL, matrix_Low_1.t()), torch.matmul(input_LH, matrix_High_1.t()))
input_H = torch.add(torch.matmul(input_HL, matrix_Low_1.t()), torch.matmul(input_HH, matrix_High_1.t()))
output = torch.add(torch.matmul(matrix_Low_0.t(), input_L), torch.matmul(matrix_High_0.t(), input_H))
return output
@staticmethod
def backward(ctx, grad_output):
matrix_Low_0, matrix_Low_1, matrix_Low_2, matrix_High_0, matrix_High_1, matrix_High_2 = ctx.saved_variables
grad_L = torch.matmul(matrix_Low_0, grad_output)
grad_H = torch.matmul(matrix_High_0, grad_output)
grad_LL = torch.matmul(grad_L, matrix_Low_1).transpose(dim0 = 2, dim1 = 3)
grad_LH = torch.matmul(grad_L, matrix_High_1).transpose(dim0 = 2, dim1 = 3)
grad_HL = torch.matmul(grad_H, matrix_Low_1).transpose(dim0 = 2, dim1 = 3)
grad_HH = torch.matmul(grad_H, matrix_High_1).transpose(dim0 = 2, dim1 = 3)
grad_LLL = torch.matmul(matrix_Low_2, grad_LL).transpose(dim0 = 2, dim1 = 3)
grad_LLH = torch.matmul(matrix_Low_2, grad_LH).transpose(dim0 = 2, dim1 = 3)
grad_LHL = torch.matmul(matrix_Low_2, grad_HL).transpose(dim0 = 2, dim1 = 3)
grad_LHH = torch.matmul(matrix_Low_2, grad_HH).transpose(dim0 = 2, dim1 = 3)
grad_HLL = torch.matmul(matrix_High_2, grad_LL).transpose(dim0 = 2, dim1 = 3)
grad_HLH = torch.matmul(matrix_High_2, grad_LH).transpose(dim0 = 2, dim1 = 3)
grad_HHL = torch.matmul(matrix_High_2, grad_HL).transpose(dim0 = 2, dim1 = 3)
grad_HHH = torch.matmul(matrix_High_2, grad_HH).transpose(dim0 = 2, dim1 = 3)
return grad_LLL, grad_LLH, grad_LHL, grad_LHH, grad_HLL, grad_HLH, grad_HHL, grad_HHH, None, None, None, None, None, None
| 10,296 | 59.928994 | 205 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/train_cifar.py | import argparse
import logging
import sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from wideresnet_wavelet import WideResNetWavelet
from wideresnet import WideResNet
from preactresnet import PreActResNet18, PreActResNet50
from models import *
from utils import *
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
upper_limit, lower_limit = 1,0
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=10, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=True):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def dlr_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1]
* (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
return loss_value.mean()
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts,
norm, mixup=False, y_a=None, y_b=None, lam=None,
early_stop=False, early_stop_pgd_max=1,
multitarget=False,
use_DLRloss=False, use_CWloss=False,
epoch=0, totalepoch=110, gamma=0.8,
use_adaptive=False, s_HE=15,
fast_better=False, BNeval=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
if BNeval:
model.eval()
for _ in range(restarts):
# early stop pgd counter for each x
early_stop_pgd_count = early_stop_pgd_max * torch.ones(y.shape[0], dtype=torch.int32).cuda()
# initialize perturbation
delta = torch.zeros_like(X).cuda()
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0),-1)
n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r/n*epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit-X, upper_limit-X)
delta.requires_grad = True
iter_count = torch.zeros(y.shape[0])
# craft adversarial examples
for _ in range(attack_iters):
output = model(normalize(X + delta))
# if use early stop pgd
if early_stop:
# calculate mask for early stop pgd
if_success_fool = (output.max(1)[1] != y).to(dtype=torch.int32)
early_stop_pgd_count = early_stop_pgd_count - if_success_fool
index = torch.where(early_stop_pgd_count > 0)[0]
iter_count[index] = iter_count[index] + 1
else:
index = slice(None,None,None)
if not isinstance(index, slice) and len(index) == 0:
break
# Whether use mixup criterion
if fast_better:
loss_ori = F.cross_entropy(output, y)
grad_ori = torch.autograd.grad(loss_ori, delta, create_graph=True)[0]
loss_grad = (alpha / 4.) * (torch.norm(grad_ori.view(grad_ori.shape[0], -1), p=2, dim=1) ** 2)
loss = loss_ori + loss_grad.mean()
loss.backward()
grad = delta.grad.detach()
elif not mixup:
if multitarget:
random_label = torch.randint(low=0, high=10, size=y.shape).cuda()
random_direction = 2*((random_label == y).to(dtype=torch.float32) - 0.5)
loss = torch.mean(random_direction * F.cross_entropy(output, random_label, reduction='none'))
loss.backward()
grad = delta.grad.detach()
elif use_DLRloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * dlr_loss(output, y)
loss.backward()
grad = delta.grad.detach()
elif use_CWloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * CW_loss(output, y)
loss.backward()
grad = delta.grad.detach()
else:
if use_adaptive:
loss = F.cross_entropy(s_HE * output, y)
else:
#print(output.shape)
#print(y.shape)
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
else:
criterion = nn.CrossEntropyLoss()
loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
loss.backward()
grad = delta.grad.detach()
d = delta[index, :, :, :]
g = grad[index, :, :, :]
x = X[index, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1)
scaled_g = g/(g_norm + 1e-10)
d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[index, :, :, :] = d
delta.grad.zero_()
if mixup:
criterion = nn.CrossEntropyLoss(reduction='none')
all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
else:
all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
if BNeval:
model.train()
return max_delta, iter_count
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='PreActResNet18')
parser.add_argument('--l1', default=0, type=float)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epochs', default=110, type=int)
parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise', 'linear', 'piecewisesmoothed', 'piecewisezoom', 'onedrop', 'multipledecay', 'cosine', 'cyclic'])
parser.add_argument('--lr-max', default=0.1, type=float)
parser.add_argument('--lr-one-drop', default=0.01, type=float)
parser.add_argument('--lr-drop-epoch', default=100, type=int)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'free', 'none'])
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--test_epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=10, type=int)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=float)
parser.add_argument('--test-pgd-alpha', default=2, type=float)
parser.add_argument('--fgsm-alpha', default=1.25, type=float)
parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2'])
parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous'])
parser.add_argument('--fname', default='cifar_model', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--half', action='store_true')
parser.add_argument('--width-factor', default=10, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--val', action='store_true')
parser.add_argument('--chkpt-iters', default=100, type=int)
parser.add_argument('--mixture', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--mixture_alpha', type=float)
parser.add_argument('--l2', default=0, type=float)
# Group 1
parser.add_argument('--earlystopPGD', action='store_true') # whether use early stop in PGD
parser.add_argument('--earlystopPGDepoch1', default=60, type=int)
parser.add_argument('--earlystopPGDepoch2', default=100, type=int)
parser.add_argument('--warmup_lr', action='store_true') # whether warm_up lr from 0 to max_lr in the first n epochs
parser.add_argument('--warmup_lr_epoch', default=15, type=int)
parser.add_argument('--weight_decay', default=5e-4, type=float)#weight decay
parser.add_argument('--warmup_eps', action='store_true') # whether warm_up eps from 0 to 8/255 in the first n epochs
parser.add_argument('--warmup_eps_epoch', default=15, type=int)
parser.add_argument('--batch-size', default=128, type=int) #batch size
parser.add_argument('--labelsmooth', action='store_true') # whether use label smoothing
parser.add_argument('--labelsmoothvalue', default=0.0, type=float)
parser.add_argument('--lrdecay', default='base', type=str, choices=['intenselr', 'base', 'looselr', 'lineardecay'])
# Group 2
parser.add_argument('--use_DLRloss', action='store_true') # whether use DLRloss
parser.add_argument('--use_CWloss', action='store_true') # whether use CWloss
parser.add_argument('--use_multitarget', action='store_true') # whether use multitarget
parser.add_argument('--use_stronger_adv', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--stronger_index', default=0, type=int)
parser.add_argument('--use_FNandWN', action='store_true') # whether use FN and WN
parser.add_argument('--use_adaptive', action='store_true') # whether use s in attack during training
parser.add_argument('--s_FN', default=15, type=float) # s in FN
parser.add_argument('--m_FN', default=0.2, type=float) # s in FN
parser.add_argument('--use_FNonly', action='store_true') # whether use FN only
parser.add_argument('--fast_better', action='store_true')
parser.add_argument('--BNeval', action='store_true') # whether use eval mode for BN when crafting adversarial examples
parser.add_argument('--focalloss', action='store_true') # whether use focalloss
parser.add_argument('--focallosslambda', default=2., type=float)
parser.add_argument('--activation', default='ReLU', type=str)
parser.add_argument('--softplus_beta', default=1., type=float)
parser.add_argument('--optimizer', default='momentum', choices=['momentum', 'Nesterov', 'SGD_GC', 'SGD_GCC', 'Adam', 'AdamW'])
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--mixup-alpha', type=float)
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--cutout-len', type=int)
return parser.parse_args()
def get_auto_fname(args):
names = args.model + '_' + args.lr_schedule + '_eps' + str(args.epsilon) + '_bs' + str(args.batch_size) + '_maxlr' + str(args.lr_max)
# Group 1
if args.earlystopPGD:
names = names + '_earlystopPGD' + str(args.earlystopPGDepoch1) + str(args.earlystopPGDepoch2)
if args.warmup_lr:
names = names + '_warmuplr' + str(args.warmup_lr_epoch)
if args.warmup_eps:
names = names + '_warmupeps' + str(args.warmup_eps_epoch)
if args.weight_decay != 5e-4:
names = names + '_wd' + str(args.weight_decay)
if args.labelsmooth:
names = names + '_ls' + str(args.labelsmoothvalue)
# Group 2
if args.use_stronger_adv:
names = names + '_usestrongeradv#' + str(args.stronger_index)
if args.use_multitarget:
names = names + '_usemultitarget'
if args.use_DLRloss:
names = names + '_useDLRloss'
if args.use_CWloss:
names = names + '_useCWloss'
if args.use_FNandWN:
names = names + '_HE' + 's' + str(args.s_FN) + 'm' + str(args.m_FN)
if args.use_adaptive:
names = names + 'adaptive'
if args.use_FNonly:
names = names + '_FNonly'
if args.fast_better:
names = names + '_fastbetter'
if args.activation != 'ReLU':
names = names + '_' + args.activation
if args.activation == 'Softplus':
names = names + str(args.softplus_beta)
if args.lrdecay != 'base':
names = names + '_' + args.lrdecay
if args.BNeval:
names = names + '_BNeval'
if args.focalloss:
names = names + '_focalloss' + str(args.focallosslambda)
if args.optimizer != 'momentum':
names = names + '_' + args.optimizer
if args.mixup:
names = names + '_mixup' + str(args.mixup_alpha)
if args.cutout:
names = names + '_cutout' + str(args.cutout_len)
if args.attack != 'pgd':
names = names + '_' + args.attack
print('File name: ', names)
return names
def main():
args = get_args()
if args.fname == 'auto':
names = get_auto_fname(args)
args.fname = 'trained_models/' + names
else:
args.fname = 'trained_models/' + args.fname
if not os.path.exists(args.fname):
os.makedirs(args.fname)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')),
logging.StreamHandler()
])
logger.info(args)
# Set seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Prepare data
transforms = [Crop(32, 32), FlipLR()]
if args.cutout:
transforms.append(Cutout(args.cutout_len, args.cutout_len))
if args.val:
try:
dataset = torch.load("cifar10_validation_split.pth")
except:
print("Couldn't find a dataset with a validation split, did you run "
"generate_validation.py?")
return
val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels']))
val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=4)
else:
dataset = cifar10(args.data_dir)
train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.),
dataset['train']['labels']))
train_set_x = Transform(train_set, transforms)
train_batches = Batches(train_set_x, args.batch_size, shuffle=True, set_random_choices=True, num_workers=4)
test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels']))
test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=4)
# Set perturbations
epsilon = (args.epsilon / 255.)
test_epsilon = (args.test_epsilon / 255.)
pgd_alpha = (args.pgd_alpha / 255.)
test_pgd_alpha = (args.test_pgd_alpha / 255.)
# Set models
if args.model == 'VGG':
model = VGG('VGG19')
elif args.model == 'ResNet18':
model = ResNet18()
elif args.model == 'GoogLeNet':
model = GoogLeNet()
elif args.model == 'DenseNet121':
model = DenseNet121()
elif args.model == 'DenseNet201':
model = DenseNet201()
elif args.model == 'ResNeXt29':
model = ResNeXt29_2x64d()
elif args.model == 'ResNeXt29L':
model = ResNeXt29_32x4d()
elif args.model == 'MobileNet':
model = MobileNet()
elif args.model == 'MobileNetV2':
model = MobileNetV2()
elif args.model == 'DPN26':
model = DPN26()
elif args.model == 'DPN92':
model = DPN92()
elif args.model == 'ShuffleNetG2':
model = ShuffleNetG2()
elif args.model == 'SENet18':
model = SENet18()
elif args.model == 'ShuffleNetV2':
model = ShuffleNetV2(1)
elif args.model == 'EfficientNetB0':
model = EfficientNetB0()
elif args.model == 'PNASNetA':
model = PNASNetA()
elif args.model == 'RegNetX':
model = RegNetX_200MF()
elif args.model == 'RegNetLX':
model = RegNetX_400MF()
elif args.model == 'PreActResNet50':
model = PreActResNet50()
elif args.model == 'PreActResNet18':
model = PreActResNet18(normalize_only_FN=args.use_FNonly, normalize=args.use_FNandWN, scale=args.s_FN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet':
model = WideResNet(34, 10, widen_factor=10, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet_20':
model = WideResNet(34, 10, widen_factor=20, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == "WideResNetWavelet":
model = WideResNetWavelet(34, 10, widen_factor=10, dropRate=0.0, normalize=args.use_FNandWN, activation=args.activation, softplus_beta=args.softplus_beta)
else:
raise ValueError("Unknown model")
model = nn.DataParallel(model).cuda()
model.train()
# Set training hyperparameters
if args.l2:
decay, no_decay = [], []
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
decay.append(param)
else:
no_decay.append(param)
params = [{'params':decay, 'weight_decay':args.l2},
{'params':no_decay, 'weight_decay': 0 }]
else:
params = model.parameters()
if args.lr_schedule == 'cyclic':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
else:
if args.optimizer == 'momentum':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Nesterov':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay, nesterov=True)
elif args.optimizer == 'SGD_GC':
opt = SGD_GC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD_GCC':
opt = SGD_GCC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Adam':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
elif args.optimizer == 'AdamW':
opt = torch.optim.AdamW(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
# Cross-entropy (mean)
if args.labelsmooth:
criterion = LabelSmoothingLoss(smoothing=args.labelsmoothvalue)
else:
criterion = nn.CrossEntropyLoss()
# If we use freeAT or fastAT with previous init
if args.attack == 'free':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
elif args.attack == 'fgsm' and args.fgsm_init == 'previous':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
if args.attack == 'free':
epochs = int(math.ceil(args.epochs / args.attack_iters))
else:
epochs = args.epochs
# Set lr schedule
if args.lr_schedule == 'superconverge':
lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0]
elif args.lr_schedule == 'piecewise':
def lr_schedule(t, warm_up_lr = args.warmup_lr):
if t < 100:
if warm_up_lr and t < args.warmup_lr_epoch:
return (t + 1.) / args.warmup_lr_epoch * args.lr_max
else:
return args.lr_max
if args.lrdecay == 'lineardecay':
if t < 105:
return args.lr_max * 0.02 * (105 - t)
else:
return 0.
elif args.lrdecay == 'intenselr':
if t < 102:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'looselr':
if t < 150:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'base':
if t < 105:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lr_schedule == 'linear':
lr_schedule = lambda t: np.interp([t], [0, args.epochs // 3, args.epochs * 2 // 3, args.epochs], [args.lr_max, args.lr_max, args.lr_max / 10, args.lr_max / 100])[0]
elif args.lr_schedule == 'onedrop':
def lr_schedule(t):
if t < args.lr_drop_epoch:
return args.lr_max
else:
return args.lr_one_drop
elif args.lr_schedule == 'multipledecay':
def lr_schedule(t):
return args.lr_max - (t//(args.epochs//10))*(args.lr_max/10)
elif args.lr_schedule == 'cosine':
def lr_schedule(t):
return args.lr_max * 0.5 * (1 + np.cos(t / args.epochs * np.pi))
elif args.lr_schedule == 'cyclic':
def lr_schedule(t, stepsize=18, min_lr=1e-5, max_lr=args.lr_max):
# Scaler: we can adapt this if we do not want the triangular CLR
scaler = lambda x: 1.
# Additional function to see where on the cycle we are
cycle = math.floor(1 + t / (2 * stepsize))
x = abs(t / stepsize - 2 * cycle + 1)
relative = max(0, (1 - x)) * scaler(cycle)
return min_lr + (max_lr - min_lr) * relative
#### Set stronger adv attacks when decay the lr ####
def eps_alpha_schedule(t, warm_up_eps = args.warmup_eps, if_use_stronger_adv=args.use_stronger_adv, stronger_index=args.stronger_index): # Schedule number 0
if stronger_index == 0:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha, pgd_alpha]
elif stronger_index == 1:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha * 1.25, pgd_alpha * 1.5]
elif stronger_index == 2:
epsilon_s = [epsilon * 2, epsilon * 2.5]
pgd_alpha_s = [pgd_alpha * 1.5, pgd_alpha * 2]
else:
print('Undefined stronger index')
if if_use_stronger_adv:
if t < 100:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
elif t < 105:
return epsilon_s[0], pgd_alpha_s[0], args.restarts
else:
return epsilon_s[1], pgd_alpha_s[1], args.restarts
else:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
#### Set the counter for the early stop of PGD ####
def early_stop_counter_schedule(t):
if t < args.earlystopPGDepoch1:
return 1
elif t < args.earlystopPGDepoch2:
return 2
else:
return 3
best_test_robust_acc = 0
best_val_robust_acc = 0
if args.resume:
start_epoch = args.resume
model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth')))
opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth')))
logger.info(f'Resuming at epoch {start_epoch}')
best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc']
if args.val:
best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc']
else:
start_epoch = 0
if args.eval:
if not args.resume:
logger.info("No model loaded to evaluate, specify with --resume FNAME")
return
logger.info("[Evaluation mode]")
# logger.info('Epoch \t Train Time \t Test Time \t LR \t Train Loss \t Train Grad \t Train Acc \t Train Robust Loss \t Train Robust Acc || \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc')
logger.info('Epoch \t Train Acc \t Train Robust Acc \t Test Acc \t Test Robust Acc')
# Records per epoch for savetxt
train_loss_record = []
train_acc_record = []
train_robust_loss_record = []
train_robust_acc_record = []
train_grad_record = []
test_loss_record = []
test_acc_record = []
test_robust_loss_record = []
test_robust_acc_record = []
test_grad_record = []
for epoch in range(start_epoch, epochs):
model.train()
start_time = time.time()
train_loss = 0
train_acc = 0
train_robust_loss = 0
train_robust_acc = 0
train_n = 0
train_grad = 0
record_iter = torch.tensor([])
for i, batch in enumerate(train_batches):
if args.eval:
break
X, y = batch['input'], batch['target']
onehot_target_withmargin_HE = args.m_FN * args.s_FN * torch.nn.functional.one_hot(y, num_classes=10)
if args.mixup:
X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha)
X, y_a, y_b = map(Variable, (X, y_a, y_b))
epoch_now = epoch + (i + 1) / len(train_batches)
lr = lr_schedule(epoch_now)
opt.param_groups[0].update(lr=lr)
if args.attack == 'pgd':
# Random initialization
epsilon_sche, pgd_alpha_sche, restarts_sche = eps_alpha_schedule(epoch_now)
early_counter_max = early_stop_counter_schedule(epoch_now)
if args.mixup:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max,
mixup=True, y_a=y_a, y_b=y_b, lam=lam)
else:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max, multitarget=args.use_multitarget,
use_DLRloss=args.use_DLRloss, use_CWloss=args.use_CWloss,
epoch=epoch_now, totalepoch=args.epochs, gamma=0.8,
use_adaptive=args.use_adaptive, s_HE=args.s_FN,
fast_better=args.fast_better, BNeval=args.BNeval)
record_iter = torch.cat((record_iter, iter_counts))
delta = delta.detach()
elif args.attack == 'fgsm':
delta,_ = attack_pgd(model, X, y, epsilon, args.fgsm_alpha*epsilon, 1, 1, args.norm, fast_better=args.fast_better)
delta = delta.detach()
# Standard training
elif args.attack == 'none':
delta = torch.zeros_like(X)
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
# Training losses
if args.mixup:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam)
elif args.mixture:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = args.mixture_alpha * criterion(robust_output, y) + (1-args.mixture_alpha) * criterion(output, y)
else:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.focalloss:
criterion_nonreduct = nn.CrossEntropyLoss(reduction='none')
robust_confidence = F.softmax(robust_output, dim=1)[:, y].detach()
robust_loss = (criterion_nonreduct(robust_output, y) * ((1. - robust_confidence) ** args.focallosslambda)).mean()
elif args.use_DLRloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * dlr_loss(robust_output, y)
elif args.use_CWloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * CW_loss(robust_output, y)
elif args.use_FNandWN:
#print('use FN and WN with margin')
robust_loss = criterion(args.s_FN * robust_output - onehot_target_withmargin_HE, y)
else:
robust_loss = criterion(robust_output, y)
if args.l1:
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
robust_loss += args.l1*param.abs().sum()
opt.zero_grad()
robust_loss.backward()
opt.step()
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.mixup:
loss = mixup_criterion(criterion, output, y_a, y_b, lam)
else:
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
# Record the statstic values
train_robust_loss += robust_loss.item() * y.size(0)
train_robust_acc += (robust_output.max(1)[1] == y).sum().item()
train_loss += loss.item() * y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
train_grad += input_grads.abs().sum()
train_time = time.time()
if args.earlystopPGD:
print('Iter mean: ', record_iter.mean().item(), ' Iter std: ', record_iter.std().item())
#print('Learning rate: ', lr)
#print('Eps: ', epsilon_sche)
# Evaluate on test data
model.eval()
test_loss = 0
test_acc = 0
test_robust_loss = 0
test_robust_acc = 0
test_n = 0
test_grad = 0
for i, batch in enumerate(test_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, test_pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
robust_loss = criterion(robust_output, y)
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
test_robust_loss += robust_loss.item() * y.size(0)
test_robust_acc += (robust_output.max(1)[1] == y).sum().item()
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
test_n += y.size(0)
test_grad += input_grads.abs().sum()
test_time = time.time()
if args.val:
val_loss = 0
val_acc = 0
val_robust_loss = 0
val_robust_acc = 0
val_n = 0
for i, batch in enumerate(val_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
elif args.attack == 'pgd':
delta, _ = attack_pgd(model, X, y, test_epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
elif args.attack == 'fgsm':
delta,_ = attack_pgd(model, X, y, epsilon, args.fgsm_alpha*epsilon, 1, 1, args.norm, fast_better=args.fast_better)
delta = delta.detach()
robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit)))
robust_loss = criterion(robust_output, y)
output = model(normalize(X))
loss = criterion(output, y)
val_robust_loss += robust_loss.item() * y.size(0)
val_robust_acc += (robust_output.max(1)[1] == y).sum().item()
val_loss += loss.item() * y.size(0)
val_acc += (output.max(1)[1] == y).sum().item()
val_n += y.size(0)
if not args.eval:
# logger.info('%d \t %.1f \t %.1f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f %.4f \t %.4f \t %.4f',
# epoch, train_time - start_time, test_time - train_time, lr,
# train_loss/train_n, train_grad/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n,
# test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
logger.info('%d \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, train_acc/train_n, train_robust_acc/train_n, test_acc/test_n, test_robust_acc/test_n)
# Save results
train_loss_record.append(train_loss/train_n)
train_acc_record.append(train_acc/train_n)
train_robust_loss_record.append(train_robust_loss/train_n)
train_robust_acc_record.append(train_robust_acc/train_n)
train_grad_record.append(train_grad/train_n)
np.savetxt(args.fname+'/train_loss_record.txt', np.array(train_loss_record))
np.savetxt(args.fname+'/train_acc_record.txt', np.array(train_acc_record))
np.savetxt(args.fname+'/train_robust_loss_record.txt', np.array(train_robust_loss_record))
np.savetxt(args.fname+'/train_robust_acc_record.txt', np.array(train_robust_acc_record))
np.savetxt(args.fname+'/train_grad_record.txt', np.array(train_grad_record))
test_loss_record.append(test_loss/train_n)
test_acc_record.append(test_acc/train_n)
test_robust_loss_record.append(test_robust_loss/train_n)
test_robust_acc_record.append(test_robust_acc/train_n)
test_grad_record.append(test_grad/train_n)
np.savetxt(args.fname+'/test_loss_record.txt', np.array(test_loss_record))
np.savetxt(args.fname+'/test_acc_record.txt', np.array(test_acc_record))
np.savetxt(args.fname+'/test_robust_loss_record.txt', np.array(test_robust_loss_record))
np.savetxt(args.fname+'/test_robust_acc_record.txt', np.array(test_robust_acc_record))
np.savetxt(args.fname+'/test_grad_record.txt', np.array(test_grad_record))
if args.val:
logger.info('validation %.4f \t %.4f \t %.4f \t %.4f',
val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n)
if val_robust_acc/val_n > best_val_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
'val_robust_acc':val_robust_acc/val_n,
'val_robust_loss':val_robust_loss/val_n,
'val_loss':val_loss/val_n,
'val_acc':val_acc/val_n,
}, os.path.join(args.fname, f'model_val.pth'))
best_val_robust_acc = val_robust_acc/val_n
# save checkpoint
if epoch > 99 or (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs:
torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth'))
torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth'))
# save best
if test_robust_acc/test_n > best_test_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
}, os.path.join(args.fname, f'model_best.pth'))
best_test_robust_acc = test_robust_acc/test_n
else:
logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f',
epoch, train_time - start_time, test_time - train_time, -1,
-1, -1, -1, -1,
test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
return
if __name__ == "__main__":
main()
| 40,405 | 41.177453 | 208 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/wideresnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
if activation == 'ReLU':
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
print('R')
elif activation == 'Softplus':
self.relu1 = nn.Softplus(beta=softplus_beta, threshold=20)
self.relu2 = nn.Softplus(beta=softplus_beta, threshold=20)
print('S')
elif activation == 'GELU':
self.relu1 = nn.GELU()
self.relu2 = nn.GELU()
print('G')
elif activation == 'ELU':
self.relu1 = nn.ELU(alpha=1.0, inplace=True)
self.relu2 = nn.ELU(alpha=1.0, inplace=True)
print('E')
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(NetworkBlock, self).__init__()
self.activation = activation
self.softplus_beta = softplus_beta
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate,
self.activation, self.softplus_beta))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0, normalize=False, activation='ReLU', softplus_beta=1):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
self.normalize = normalize
#self.scale = scale
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
elif activation == 'GELU':
self.relu = nn.GELU()
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('Use activation of ' + activation)
if self.normalize:
self.fc = nn.Linear(nChannels[3], num_classes, bias = False)
else:
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) and not self.normalize:
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if self.normalize:
out = F.normalize(out, p=2, dim=1)
for _, module in self.fc.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
return self.fc(out) | 5,747 | 43.90625 | 141 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/shufflenetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5):
super(BasicBlock, self).__init__()
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
out = F.relu(self.bn3(self.conv3(out)))
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], 10)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def test():
net = ShuffleNetV2(net_size=0.5)
x = torch.randn(3, 3, 32, 32)
y = net(x)
print(y.shape)
# test()
| 5,530 | 32.932515 | 107 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/regnet.py | '''RegNet in PyTorch.
Paper: "Designing Network Design Spaces".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SE(nn.Module):
'''Squeeze-and-Excitation block.'''
def __init__(self, in_planes, se_planes):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = F.relu(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
def __init__(self, w_in, w_out, stride, group_width, bottleneck_ratio, se_ratio):
super(Block, self).__init__()
# 1x1
w_b = int(round(w_out * bottleneck_ratio))
self.conv1 = nn.Conv2d(w_in, w_b, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(w_b)
# 3x3
num_groups = w_b // group_width
self.conv2 = nn.Conv2d(w_b, w_b, kernel_size=3,
stride=stride, padding=1, groups=num_groups, bias=False)
self.bn2 = nn.BatchNorm2d(w_b)
# se
self.with_se = se_ratio > 0
if self.with_se:
w_se = int(round(w_in * se_ratio))
self.se = SE(w_b, w_se)
# 1x1
self.conv3 = nn.Conv2d(w_b, w_out, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(w_out)
self.shortcut = nn.Sequential()
if stride != 1 or w_in != w_out:
self.shortcut = nn.Sequential(
nn.Conv2d(w_in, w_out,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(w_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
if self.with_se:
out = self.se(out)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class RegNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(RegNet, self).__init__()
self.cfg = cfg
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(0)
self.layer2 = self._make_layer(1)
self.layer3 = self._make_layer(2)
self.layer4 = self._make_layer(3)
self.linear = nn.Linear(self.cfg['widths'][-1], num_classes)
def _make_layer(self, idx):
depth = self.cfg['depths'][idx]
width = self.cfg['widths'][idx]
stride = self.cfg['strides'][idx]
group_width = self.cfg['group_width']
bottleneck_ratio = self.cfg['bottleneck_ratio']
se_ratio = self.cfg['se_ratio']
layers = []
for i in range(depth):
s = stride if i == 0 else 1
layers.append(Block(self.in_planes, width,
s, group_width, bottleneck_ratio, se_ratio))
self.in_planes = width
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def RegNetX_200MF():
cfg = {
'depths': [1, 1, 4, 7],
'widths': [24, 56, 152, 368],
'strides': [1, 1, 2, 2],
'group_width': 8,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetX_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetY_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0.25,
}
return RegNet(cfg)
def test():
net = RegNetX_200MF()
print(net)
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 4,548 | 28.160256 | 106 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0():
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 5,719 | 31.5 | 106 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/pnasnet.py | '''PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SepConv(nn.Module):
'''Separable Convolution.'''
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes,
kernel_size, stride,
padding=(kernel_size-1)//2,
bias=False, groups=in_planes)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
return self.bn1(self.conv1(x))
class CellA(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellA, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y2 = self.bn1(self.conv1(y2))
return F.relu(y1+y2)
class CellB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellB, self).__init__()
self.stride = stride
# Left branch
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride)
# Right branch
self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
# Reduce channels
self.conv2 = nn.Conv2d(2*out_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
# Left branch
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
# Right branch
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
# Concat & reduce channels
b1 = F.relu(y1+y2)
b2 = F.relu(y3+y4)
y = torch.cat([b1,b2], 1)
return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_planes)
self.layer1 = self._make_layer(num_planes, num_cells=6)
self.layer2 = self._downsample(num_planes*2)
self.layer3 = self._make_layer(num_planes*2, num_cells=6)
self.layer4 = self._downsample(num_planes*4)
self.layer5 = self._make_layer(num_planes*4, num_cells=6)
self.linear = nn.Linear(num_planes*4, 10)
def _make_layer(self, planes, num_cells):
layers = []
for _ in range(num_cells):
layers.append(self.cell_type(self.in_planes, planes, stride=1))
self.in_planes = planes
return nn.Sequential(*layers)
def _downsample(self, planes):
layer = self.cell_type(self.in_planes, planes, stride=2)
self.in_planes = planes
return layer
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.avg_pool2d(out, 8)
out = self.linear(out.view(out.size(0), -1))
return out
def PNASNetA():
return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB():
return PNASNet(CellB, num_cells=6, num_planes=32)
def test():
net = PNASNetB()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 4,258 | 32.801587 | 105 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
| 4,218 | 30.721805 | 83 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/mobilenetv2.py | '''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,092 | 34.551724 | 114 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def test():
net = VGG('VGG11')
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 1,442 | 29.0625 | 117 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/densenet.py | '''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.805556 | 96 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/googlenet.py | '''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = GoogLeNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,221 | 28.833333 | 83 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/resnext.py | '''ResNeXt in PyTorch.
See the paper "Aggregated Residual Transformations for Deep Neural Networks" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Grouped convolution block.'''
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = cardinality * bottleneck_width
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*group_width)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*group_width:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*group_width)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
# self.layer4 = self._make_layer(num_blocks[3], 2)
self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
def _make_layer(self, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
# Increase bottleneck_width by 2 after each stage.
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3,3,3], cardinality=32, bottleneck_width=4)
def test_resnext():
net = ResNeXt29_2x64d()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test_resnext()
| 3,478 | 35.239583 | 129 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/senet.py | '''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,027 | 32.016393 | 102 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/shufflenet.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes/4
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleNetG2():
cfg = {
'out_planes': [200,400,800],
'num_blocks': [4,8,4],
'groups': 2
}
return ShuffleNet(cfg)
def ShuffleNetG3():
cfg = {
'out_planes': [240,480,960],
'num_blocks': [4,8,4],
'groups': 3
}
return ShuffleNet(cfg)
def test():
net = ShuffleNetG2()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.209091 | 126 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/lenet.py | '''LeNet in PyTorch.'''
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
| 699 | 28.166667 | 43 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/mobilenet.py | '''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 2,025 | 31.677419 | 123 | py |
AdversarialWaveletTraining | AdversarialWaveletTraining-main/models/dpn.py | '''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,562 | 34.989899 | 116 | py |
IGEV | IGEV-main/IGEV-MVS/train_mvs.py | import argparse
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2'
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
import numpy as np
import random
import time
from torch.utils.tensorboard import SummaryWriter
from datasets import find_dataset_def
from core.igev_mvs import IGEVMVS
from core.submodule import depth_normalization, depth_unnormalization
from utils import *
import sys
import datetime
from tqdm import tqdm
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='IterMVStereo for high-resolution multi-view stereo')
parser.add_argument('--mode', default='train', help='train or val', choices=['train', 'val'])
parser.add_argument('--dataset', default='dtu_yao', help='select dataset')
parser.add_argument('--trainpath', default='/data/DTU_data/dtu_train/', help='train datapath')
parser.add_argument('--valpath', help='validation datapath')
parser.add_argument('--trainlist', default='./lists/dtu/train.txt', help='train list')
parser.add_argument('--vallist', default='./lists/dtu/val.txt', help='validation list')
parser.add_argument('--maxdisp', default=256)
parser.add_argument('--epochs', type=int, default=32, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate')
parser.add_argument('--wd', type=float, default=.00001, help='weight decay')
parser.add_argument('--batch_size', type=int, default=6, help='train batch size')
parser.add_argument('--loadckpt', default=None, help='load a specific checkpoint')
parser.add_argument('--logdir', default='./checkpoints/', help='the directory to save checkpoints/logs')
parser.add_argument('--resume', action='store_true', help='continue to train the model')
parser.add_argument('--regress', action='store_true', help='train the regression and confidence')
parser.add_argument('--small_image', action='store_true', help='train with small input as 640x512, otherwise train with 1280x1024')
parser.add_argument('--summary_freq', type=int, default=20, help='print and summary frequency')
parser.add_argument('--save_freq', type=int, default=1, help='save checkpoint frequency')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed')
parser.add_argument('--iteration', type=int, default=22, help='num of iteration of GRU')
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
def sequence_loss(disp_preds, disp_init_pred, depth_gt, mask, depth_min, depth_max, loss_gamma=0.9):
""" Loss function defined over sequence of depth predictions """
cross_entropy = nn.BCEWithLogitsLoss()
n_predictions = len(disp_preds)
assert n_predictions >= 1
loss = 0.0
mask = mask > 0.5
batch, _, height, width = depth_gt.size()
inverse_depth_min = (1.0 / depth_min).view(batch, 1, 1, 1)
inverse_depth_max = (1.0 / depth_max).view(batch, 1, 1, 1)
normalized_disp_gt = depth_normalization(depth_gt, inverse_depth_min, inverse_depth_max)
loss += 1.0 * F.l1_loss(disp_init_pred[mask], normalized_disp_gt[mask], reduction='mean')
if args.iteration != 0:
for i in range(n_predictions):
adjusted_loss_gamma = loss_gamma**(15/(n_predictions - 1))
i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
loss += i_weight * F.l1_loss(disp_preds[i][mask], normalized_disp_gt[mask], reduction='mean')
return loss
# parse arguments and check
args = parser.parse_args()
if args.resume: # store_true means set the variable as "True"
assert args.mode == "train"
assert args.loadckpt is None
if args.valpath is None:
args.valpath = args.trainpath
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.mode == "train":
if not os.path.isdir(args.logdir):
os.mkdir(args.logdir)
current_time_str = str(datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
print("current time", current_time_str)
print("creating new summary file")
logger = SummaryWriter(args.logdir)
print("argv:", sys.argv[1:])
print_args(args)
# dataset, dataloader
MVSDataset = find_dataset_def(args.dataset)
train_dataset = MVSDataset(args.trainpath, args.trainlist, "train", 5, robust_train=True)
test_dataset = MVSDataset(args.valpath, args.vallist, "val", 5, robust_train=False)
TrainImgLoader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=4, drop_last=True)
TestImgLoader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=4, drop_last=False)
# model, optimizer
model = IGEVMVS(args)
if args.mode in ["train", "val"]:
model = nn.DataParallel(model)
model.cuda()
model_loss = sequence_loss
optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.wd, eps=1e-8)
# load parameters
start_epoch = 0
if (args.mode == "train" and args.resume) or (args.mode == "val" and not args.loadckpt):
saved_models = [fn for fn in os.listdir(args.logdir) if fn.endswith(".ckpt")]
saved_models = sorted(saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0]))
# use the latest checkpoint file
loadckpt = os.path.join(args.logdir, saved_models[-1])
print("resuming", loadckpt)
state_dict = torch.load(loadckpt)
model.load_state_dict(state_dict['model'], strict=False)
optimizer.load_state_dict(state_dict['optimizer'])
start_epoch = state_dict['epoch'] + 1
elif args.loadckpt:
# load checkpoint file specified by args.loadckpt
print("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'], strict=False)
print("start at epoch {}".format(start_epoch))
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# main function
def train(args):
total_steps = len(TrainImgLoader) * args.epochs + 100
lr_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, total_steps, pct_start=0.01, cycle_momentum=False, anneal_strategy='linear')
for epoch_idx in range(start_epoch, args.epochs):
print('Epoch {}:'.format(epoch_idx))
global_step = len(TrainImgLoader) * epoch_idx
# training
tbar = tqdm(TrainImgLoader)
for batch_idx, sample in enumerate(tbar):
start_time = time.time()
global_step = len(TrainImgLoader) * epoch_idx + batch_idx
do_summary = global_step % args.summary_freq == 0
scaler = GradScaler(enabled=True)
loss, scalar_outputs = train_sample(args, sample, detailed_summary=do_summary, scaler=scaler)
if do_summary:
save_scalars(logger, 'train', scalar_outputs, global_step)
del scalar_outputs
tbar.set_description(
'Epoch {}/{}, Iter {}/{}, train loss = {:.3f}, time = {:.3f}'.format(epoch_idx, args.epochs, batch_idx, len(TrainImgLoader), loss, time.time() - start_time))
lr_scheduler.step()
# checkpoint
if (epoch_idx + 1) % args.save_freq == 0:
torch.save({
'model': model.state_dict()},
"{}/model_{:0>6}.ckpt".format(args.logdir, epoch_idx))
torch.cuda.empty_cache()
# testing
avg_test_scalars = DictAverageMeter()
tbar = tqdm(TestImgLoader)
for batch_idx, sample in enumerate(tbar):
start_time = time.time()
global_step = len(TestImgLoader) * epoch_idx + batch_idx
do_summary = global_step % args.summary_freq == 0
loss, scalar_outputs = test_sample(args, sample, detailed_summary=do_summary)
if do_summary:
save_scalars(logger, 'test', scalar_outputs, global_step)
avg_test_scalars.update(scalar_outputs)
del scalar_outputs
tbar.set_description('Epoch {}/{}, Iter {}/{}, test loss = {:.3f}, time = {:3f}'.format(epoch_idx, args.epochs, batch_idx,
len(TestImgLoader), loss,
time.time() - start_time))
save_scalars(logger, 'fulltest', avg_test_scalars.mean(), global_step)
print("avg_test_scalars:", avg_test_scalars.mean())
torch.cuda.empty_cache()
def test(args):
avg_test_scalars = DictAverageMeter()
for batch_idx, sample in enumerate(TestImgLoader):
start_time = time.time()
loss, scalar_outputs = test_sample(args, sample, detailed_summary=True)
avg_test_scalars.update(scalar_outputs)
del scalar_outputs
print('Iter {}/{}, test loss = {:.3f}, time = {:3f}'.format(batch_idx, len(TestImgLoader), loss,
time.time() - start_time))
if batch_idx % 100 == 0:
print("Iter {}/{}, test results = {}".format(batch_idx, len(TestImgLoader), avg_test_scalars.mean()))
print("final", avg_test_scalars)
def train_sample(args, sample, detailed_summary=False, scaler=None):
model.train()
optimizer.zero_grad()
sample_cuda = tocuda(sample)
depth_gt = sample_cuda["depth"]
mask = sample_cuda["mask"]
depth_gt_0 = depth_gt['level_0']
mask_0 = mask['level_0']
depth_gt_1 = depth_gt['level_2']
mask_1 = mask['level_2']
disp_init, disp_predictions = model(sample_cuda["imgs"], sample_cuda["proj_matrices"],
sample_cuda["depth_min"], sample_cuda["depth_max"])
loss = model_loss(disp_predictions, disp_init, depth_gt_0, mask_0, sample_cuda["depth_min"], sample_cuda["depth_max"])
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
inverse_depth_min = (1.0 / sample_cuda["depth_min"]).view(args.batch_size, 1, 1, 1)
inverse_depth_max = (1.0 / sample_cuda["depth_max"]).view(args.batch_size, 1, 1, 1)
depth_init = depth_unnormalization(disp_init, inverse_depth_min, inverse_depth_max)
depth_predictions = []
for disp in disp_predictions:
depth_predictions.append(depth_unnormalization(disp, inverse_depth_min, inverse_depth_max))
scalar_outputs = {"loss": loss}
scalar_outputs["abs_error_initial"] = AbsDepthError_metrics(depth_init, depth_gt_0, mask_0 > 0.5)
scalar_outputs["thres1mm_initial"] = Thres_metrics(depth_init, depth_gt_0, mask_0 > 0.5, 1)
scalar_outputs["abs_error_final_full"] = AbsDepthError_metrics(depth_predictions[-1], depth_gt_0, mask_0 > 0.5)
return tensor2float(loss), tensor2float(scalar_outputs)
@make_nograd_func
def test_sample(args, sample, detailed_summary=True):
model.eval()
sample_cuda = tocuda(sample)
depth_gt = sample_cuda["depth"]
mask = sample_cuda["mask"]
depth_gt_0 = depth_gt['level_0']
mask_0 = mask['level_0']
depth_gt_1 = depth_gt['level_2']
mask_1 = mask['level_2']
disp_init, disp_predictions = model(sample_cuda["imgs"], sample_cuda["proj_matrices"],
sample_cuda["depth_min"], sample_cuda["depth_max"])
loss = model_loss(disp_predictions, disp_init, depth_gt_0, mask_0, sample_cuda["depth_min"], sample_cuda["depth_max"])
inverse_depth_min = (1.0 / sample_cuda["depth_min"]).view(sample_cuda["depth_min"].size()[0], 1, 1, 1)
inverse_depth_max = (1.0 / sample_cuda["depth_max"]).view(sample_cuda["depth_max"].size()[0], 1, 1, 1)
depth_init = depth_unnormalization(disp_init, inverse_depth_min, inverse_depth_max)
depth_predictions = []
for disp in disp_predictions:
depth_predictions.append(depth_unnormalization(disp, inverse_depth_min, inverse_depth_max))
scalar_outputs = {"loss": loss}
scalar_outputs["abs_error_initial"] = AbsDepthError_metrics(depth_init, depth_gt_0, mask_0 > 0.5)
scalar_outputs["thres1mm_initial"] = Thres_metrics(depth_init, depth_gt_0, mask_0 > 0.5, 1)
scalar_outputs["abs_error_final_full"] = AbsDepthError_metrics(depth_predictions[-1], depth_gt_0, mask_0 > 0.5)
return tensor2float(loss), tensor2float(scalar_outputs)
if __name__ == '__main__':
if args.mode == "train":
train(args)
elif args.mode == "val":
test(args)
| 12,780 | 42.472789 | 173 | py |
IGEV | IGEV-main/IGEV-MVS/utils.py | import numpy as np
import torchvision.utils as vutils
import torch
import torch.nn.functional as F
# print arguments
def print_args(args):
print("################################ args ################################")
for k, v in args.__dict__.items():
print("{0: <10}\t{1: <30}\t{2: <20}".format(k, str(v), str(type(v))))
print("########################################################################")
# torch.no_grad warpper for functions
def make_nograd_func(func):
def wrapper(*f_args, **f_kwargs):
with torch.no_grad():
ret = func(*f_args, **f_kwargs)
return ret
return wrapper
# convert a function into recursive style to handle nested dict/list/tuple variables
def make_recursive_func(func):
def wrapper(vars):
if isinstance(vars, list):
return [wrapper(x) for x in vars]
elif isinstance(vars, tuple):
return tuple([wrapper(x) for x in vars])
elif isinstance(vars, dict):
return {k: wrapper(v) for k, v in vars.items()}
else:
return func(vars)
return wrapper
@make_recursive_func
def tensor2float(vars):
if isinstance(vars, float):
return vars
elif isinstance(vars, torch.Tensor):
return vars.data.item()
else:
raise NotImplementedError("invalid input type {} for tensor2float".format(type(vars)))
@make_recursive_func
def tensor2numpy(vars):
if isinstance(vars, np.ndarray):
return vars
elif isinstance(vars, torch.Tensor):
return vars.detach().cpu().numpy().copy()
else:
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars)))
@make_recursive_func
def tocuda(vars):
if isinstance(vars, torch.Tensor):
return vars.cuda()
elif isinstance(vars, str):
return vars
else:
raise NotImplementedError("invalid input type {} for tocuda".format(type(vars)))
def save_scalars(logger, mode, scalar_dict, global_step):
scalar_dict = tensor2float(scalar_dict)
for key, value in scalar_dict.items():
if not isinstance(value, (list, tuple)):
name = '{}/{}'.format(mode, key)
logger.add_scalar(name, value, global_step)
else:
for idx in range(len(value)):
name = '{}/{}_{}'.format(mode, key, idx)
logger.add_scalar(name, value[idx], global_step)
def save_images(logger, mode, images_dict, global_step):
images_dict = tensor2numpy(images_dict)
def preprocess(name, img):
if not (len(img.shape) == 3 or len(img.shape) == 4):
raise NotImplementedError("invalid img shape {}:{} in save_images".format(name, img.shape))
if len(img.shape) == 3:
img = img[:, np.newaxis, :, :]
img = torch.from_numpy(img[:1])
return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True)
for key, value in images_dict.items():
if not isinstance(value, (list, tuple)):
name = '{}/{}'.format(mode, key)
logger.add_image(name, preprocess(name, value), global_step)
else:
for idx in range(len(value)):
name = '{}/{}_{}'.format(mode, key, idx)
logger.add_image(name, preprocess(name, value[idx]), global_step)
class DictAverageMeter(object):
def __init__(self):
self.data = {}
self.count = 0
def update(self, new_input):
self.count += 1
if len(self.data) == 0:
for k, v in new_input.items():
if not isinstance(v, float):
raise NotImplementedError("invalid data {}: {}".format(k, type(v)))
self.data[k] = v
else:
for k, v in new_input.items():
if not isinstance(v, float):
raise NotImplementedError("invalid data {}: {}".format(k, type(v)))
self.data[k] += v
def mean(self):
return {k: v / self.count for k, v in self.data.items()}
# a wrapper to compute metrics for each image individually
def compute_metrics_for_each_image(metric_func):
def wrapper(depth_est, depth_gt, mask, *args):
batch_size = depth_gt.shape[0]
results = []
# compute result one by one
for idx in range(batch_size):
ret = metric_func(depth_est[idx], depth_gt[idx], mask[idx], *args)
results.append(ret)
return torch.stack(results).mean()
return wrapper
@make_nograd_func
@compute_metrics_for_each_image
def Thres_metrics(depth_est, depth_gt, mask, thres):
# if thres is int or float, then True
assert isinstance(thres, (int, float))
depth_est, depth_gt = depth_est[mask], depth_gt[mask]
errors = torch.abs(depth_est - depth_gt)
err_mask = errors > thres
return torch.mean(err_mask.float())
# NOTE: please do not use this to build up training loss
@make_nograd_func
@compute_metrics_for_each_image
def AbsDepthError_metrics(depth_est, depth_gt, mask):
depth_est, depth_gt = depth_est[mask], depth_gt[mask]
return torch.mean((depth_est - depth_gt).abs()) | 5,160 | 32.296774 | 103 | py |
IGEV | IGEV-main/IGEV-MVS/evaluate_mvs.py | import argparse
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
import numpy as np
import time
from datasets import find_dataset_def
from core.igev_mvs import IGEVMVS
from utils import *
import sys
import cv2
from datasets.data_io import read_pfm, save_pfm
from core.submodule import depth_unnormalization
from plyfile import PlyData, PlyElement
from tqdm import tqdm
from PIL import Image
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Predict depth, filter, and fuse')
parser.add_argument('--model', default='IterMVS', help='select model')
parser.add_argument('--dataset', default='dtu_yao_eval', help='select dataset')
parser.add_argument('--testpath', default='/data/DTU_data/dtu_test/', help='testing data path')
parser.add_argument('--testlist', default='./lists/dtu/test.txt', help='testing scan list')
parser.add_argument('--maxdisp', default=256)
parser.add_argument('--split', default='intermediate', help='select data')
parser.add_argument('--batch_size', type=int, default=2, help='testing batch size')
parser.add_argument('--n_views', type=int, default=5, help='num of view')
parser.add_argument('--img_wh', nargs='+', type=int, default=[640, 480],
help='height and width of the image')
parser.add_argument('--loadckpt', default='./pretrained_models/dtu.ckpt', help='load a specific checkpoint')
parser.add_argument('--outdir', default='./output/', help='output dir')
parser.add_argument('--display', action='store_true', help='display depth images and masks')
parser.add_argument('--iteration', type=int, default=32, help='num of iteration of GRU')
parser.add_argument('--geo_pixel_thres', type=float, default=1, help='pixel threshold for geometric consistency filtering')
parser.add_argument('--geo_depth_thres', type=float, default=0.01, help='depth threshold for geometric consistency filtering')
parser.add_argument('--photo_thres', type=float, default=0.3, help='threshold for photometric consistency filtering')
# parse arguments and check
args = parser.parse_args()
print("argv:", sys.argv[1:])
print_args(args)
if args.dataset=="dtu_yao_eval":
img_wh=(1600, 1152)
elif args.dataset=="tanks":
img_wh=(1920, 1024)
elif args.dataset=="eth3d":
img_wh = (1920,1280)
else:
img_wh = (args.img_wh[0], args.img_wh[1]) # custom dataset
# read intrinsics and extrinsics
def read_camera_parameters(filename):
with open(filename) as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
return intrinsics, extrinsics
# read an image
def read_img(filename, img_wh):
img = Image.open(filename)
# scale 0~255 to 0~1
np_img = np.array(img, dtype=np.float32) / 255.
original_h, original_w, _ = np_img.shape
np_img = cv2.resize(np_img, img_wh, interpolation=cv2.INTER_LINEAR)
return np_img, original_h, original_w
# save a binary mask
def save_mask(filename, mask):
assert mask.dtype == np.bool_
mask = mask.astype(np.uint8) * 255
Image.fromarray(mask).save(filename)
def save_depth_img(filename, depth):
# assert mask.dtype == np.bool
depth = depth.astype(np.float32) * 255
Image.fromarray(depth).save(filename)
def read_pair_file(filename):
data = []
with open(filename) as f:
num_viewpoint = int(f.readline())
# 49 viewpoints
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
if len(src_views) != 0:
data.append((ref_view, src_views))
return data
# run MVS model to save depth maps
def save_depth():
# dataset, dataloader
MVSDataset = find_dataset_def(args.dataset)
if args.dataset=="dtu_yao_eval":
test_dataset = MVSDataset(args.testpath, args.testlist, args.n_views, img_wh)
elif args.dataset=="tanks":
test_dataset = MVSDataset(args.testpath, args.n_views, img_wh, args.split)
elif args.dataset=="eth3d":
test_dataset = MVSDataset(args.testpath, args.split, args.n_views, img_wh)
else:
test_dataset = MVSDataset(args.testpath, args.n_views, img_wh)
TestImgLoader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=4, drop_last=False)
# model
model = IGEVMVS(args)
model = nn.DataParallel(model)
model.cuda()
# load checkpoint file specified by args.loadckpt
print("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'])
model.eval()
with torch.no_grad():
tbar = tqdm(TestImgLoader)
for batch_idx, sample in enumerate(tbar):
start_time = time.time()
sample_cuda = tocuda(sample)
disp_prediction = model(sample_cuda["imgs"], sample_cuda["proj_matrices"],
sample_cuda["depth_min"], sample_cuda["depth_max"], test_mode=True)
b = sample_cuda["depth_min"].shape[0]
inverse_depth_min = (1.0 / sample_cuda["depth_min"]).view(b, 1, 1, 1)
inverse_depth_max = (1.0 / sample_cuda["depth_max"]).view(b, 1, 1, 1)
depth_prediction = depth_unnormalization(disp_prediction, inverse_depth_min, inverse_depth_max)
depth_prediction = tensor2numpy(depth_prediction.float())
del sample_cuda, disp_prediction
tbar.set_description('Iter {}/{}, time = {:.3f}'.format(batch_idx, len(TestImgLoader), time.time() - start_time))
filenames = sample["filename"]
# save depth maps and confidence maps
for filename, depth_est in zip(filenames, depth_prediction):
depth_filename = os.path.join(args.outdir, filename.format('depth_est', '.pfm'))
os.makedirs(depth_filename.rsplit('/', 1)[0], exist_ok=True)
# save depth maps
depth_est = np.squeeze(depth_est, 0)
save_pfm(depth_filename, depth_est)
# project the reference point cloud into the source view, then project back
def reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
## step1. project reference pixels to the source view
# reference view x, y
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])
# reference 3D space
xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref),
np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1]))
# source 3D space
xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)),
np.vstack((xyz_ref, np.ones_like(x_ref))))[:3]
# source view x, y
K_xyz_src = np.matmul(intrinsics_src, xyz_src)
xy_src = K_xyz_src[:2] / K_xyz_src[2:3]
## step2. reproject the source view points with source view depth estimation
# find the depth estimation of the source view
x_src = xy_src[0].reshape([height, width]).astype(np.float32)
y_src = xy_src[1].reshape([height, width]).astype(np.float32)
sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)
# mask = sampled_depth_src > 0
# source 3D space
# NOTE that we should use sampled source-view depth_here to project back
xyz_src = np.matmul(np.linalg.inv(intrinsics_src),
np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1]))
# reference 3D space
xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)),
np.vstack((xyz_src, np.ones_like(x_ref))))[:3]
# source view x, y, depth
depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32)
K_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected)
xy_reprojected = K_xyz_reprojected[:2] / (K_xyz_reprojected[2:3]+1e-6)
x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32)
y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32)
return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src
def check_geometric_consistency(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src, thre1, thre2):
width, height = depth_ref.shape[1], depth_ref.shape[0]
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src = reproject_with_depth(depth_ref,
intrinsics_ref,
extrinsics_ref,
depth_src,
intrinsics_src,
extrinsics_src)
# check |p_reproj-p_1| < 1
dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)
# check |d_reproj-d_1| / d_1 < 0.01
depth_diff = np.abs(depth_reprojected - depth_ref)
relative_depth_diff = depth_diff / depth_ref
masks=[]
for i in range(2,11):
mask = np.logical_and(dist < i/thre1, relative_depth_diff < i/thre2)
masks.append(mask)
depth_reprojected[~mask] = 0
return masks, mask, depth_reprojected, x2d_src, y2d_src
def filter_depth(scan_folder, out_folder, plyfilename, geo_pixel_thres, geo_depth_thres, photo_thres, img_wh, geo_mask_thres=3):
# the pair file
pair_file = os.path.join(scan_folder, "pair.txt")
# for the final point cloud
vertexs = []
vertex_colors = []
pair_data = read_pair_file(pair_file)
nviews = len(pair_data)
thre_left = -2
thre_right = 2
total_iter = 10
for iter in range(total_iter):
thre = (thre_left + thre_right) / 2
print(f"{iter} {10 ** thre}")
depth_est_averaged = []
geo_mask_all = []
# for each reference view and the corresponding source views
for ref_view, src_views in pair_data:
# load the camera parameters
ref_intrinsics, ref_extrinsics = read_camera_parameters(
os.path.join(scan_folder, 'cams_1/{:0>8}_cam.txt'.format(ref_view)))
ref_img, original_h, original_w = read_img(os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(ref_view)), img_wh)
ref_intrinsics[0] *= img_wh[0]/original_w
ref_intrinsics[1] *= img_wh[1]/original_h
# load the estimated depth of the reference view
ref_depth_est = read_pfm(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(ref_view)))[0]
ref_depth_est = np.squeeze(ref_depth_est, 2)
all_srcview_depth_ests = []
# compute the geometric mask
geo_mask_sum = 0
geo_mask_sums=[]
n = 1 + len(src_views)
ct = 0
for src_view in src_views:
ct = ct + 1
# camera parameters of the source view
src_intrinsics, src_extrinsics = read_camera_parameters(
os.path.join(scan_folder, 'cams_1/{:0>8}_cam.txt'.format(src_view)))
_, original_h, original_w = read_img(os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(src_view)), img_wh)
src_intrinsics[0] *= img_wh[0]/original_w
src_intrinsics[1] *= img_wh[1]/original_h
# the estimated depth of the source view
src_depth_est = read_pfm(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(src_view)))[0]
masks, geo_mask, depth_reprojected, _, _ = check_geometric_consistency(ref_depth_est, ref_intrinsics, ref_extrinsics,
src_depth_est,
src_intrinsics, src_extrinsics, 10 ** thre * 4, 10 ** thre * 1300)
if (ct==1):
for i in range(2,n):
geo_mask_sums.append(masks[i-2].astype(np.int32))
else:
for i in range(2,n):
geo_mask_sums[i-2]+=masks[i-2].astype(np.int32)
geo_mask_sum+=geo_mask.astype(np.int32)
all_srcview_depth_ests.append(depth_reprojected)
geo_mask=geo_mask_sum>=n
for i in range (2,n):
geo_mask=np.logical_or(geo_mask,geo_mask_sums[i-2]>=i)
depth_est_averaged.append((sum(all_srcview_depth_ests) + ref_depth_est) / (geo_mask_sum + 1))
geo_mask_all.append(np.mean(geo_mask))
final_mask = geo_mask
if iter == total_iter - 1:
os.makedirs(os.path.join(out_folder, "mask"), exist_ok=True)
save_mask(os.path.join(out_folder, "mask/{:0>8}_geo.png".format(ref_view)), geo_mask)
save_mask(os.path.join(out_folder, "mask/{:0>8}_final.png".format(ref_view)), final_mask)
print("processing {}, ref-view{:0>2}, geo_mask:{:3f} final_mask: {:3f}".format(scan_folder, ref_view,
geo_mask.mean(), final_mask.mean()))
if args.display:
cv2.imshow('ref_img', ref_img[:, :, ::-1])
cv2.imshow('ref_depth', ref_depth_est / np.max(ref_depth_est))
cv2.imshow('ref_depth * geo_mask', ref_depth_est * geo_mask.astype(np.float32) / np.max(ref_depth_est))
cv2.imshow('ref_depth * mask', ref_depth_est * final_mask.astype(np.float32) / np.max(ref_depth_est))
cv2.waitKey(0)
height, width = depth_est_averaged[-1].shape[:2]
x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))
valid_points = final_mask
# print("valid_points", valid_points.mean())
x, y, depth = x[valid_points], y[valid_points], depth_est_averaged[-1][valid_points]
color = ref_img[valid_points]
xyz_ref = np.matmul(np.linalg.inv(ref_intrinsics),
np.vstack((x, y, np.ones_like(x))) * depth)
xyz_world = np.matmul(np.linalg.inv(ref_extrinsics),
np.vstack((xyz_ref, np.ones_like(x))))[:3]
vertexs.append(xyz_world.transpose((1, 0)))
vertex_colors.append((color * 255).astype(np.uint8))
if np.mean(geo_mask_all) >= 0.25:
thre_left = thre
else:
thre_right = thre
vertexs = np.concatenate(vertexs, axis=0)
vertex_colors = np.concatenate(vertex_colors, axis=0)
vertexs = np.array([tuple(v) for v in vertexs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_colors = np.array([tuple(v) for v in vertex_colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
for prop in vertexs.dtype.names:
vertex_all[prop] = vertexs[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, 'vertex')
PlyData([el]).write(plyfilename)
print("saving the final model to", plyfilename)
if __name__ == '__main__':
save_depth()
if args.dataset=="dtu_yao_eval":
with open(args.testlist) as f:
scans = f.readlines()
scans = [line.rstrip() for line in scans]
for scan in scans:
scan_id = int(scan[4:])
scan_folder = os.path.join(args.testpath, scan)
out_folder = os.path.join(args.outdir, scan)
filter_depth(scan_folder, out_folder, os.path.join(args.outdir, 'igev_mvs{:0>3}_l3.ply'.format(scan_id)),
args.geo_pixel_thres, args.geo_depth_thres, args.photo_thres, img_wh, 4)
elif args.dataset=="tanks":
# intermediate dataset
if args.split == "intermediate":
scans = ['Family', 'Francis', 'Horse', 'Lighthouse',
'M60', 'Panther', 'Playground', 'Train']
geo_mask_thres = {'Family': 5,
'Francis': 6,
'Horse': 5,
'Lighthouse': 6,
'M60': 5,
'Panther': 5,
'Playground': 5,
'Train': 5}
for scan in scans:
scan_folder = os.path.join(args.testpath, args.split, scan)
out_folder = os.path.join(args.outdir, scan)
filter_depth(scan_folder, out_folder, os.path.join(args.outdir, scan + '.ply'),
args.geo_pixel_thres, args.geo_depth_thres, args.photo_thres, img_wh, geo_mask_thres[scan])
# advanced dataset
elif args.split == "advanced":
scans = ['Auditorium', 'Ballroom', 'Courtroom',
'Museum', 'Palace', 'Temple']
geo_mask_thres = {'Auditorium': 3,
'Ballroom': 4,
'Courtroom': 4,
'Museum': 4,
'Palace': 5,
'Temple': 4}
for scan in scans:
scan_folder = os.path.join(args.testpath, args.split, scan)
out_folder = os.path.join(args.outdir, scan)
filter_depth(scan_folder, out_folder, os.path.join(args.outdir, scan + '.ply'),
args.geo_pixel_thres, args.geo_depth_thres, args.photo_thres, img_wh, geo_mask_thres[scan])
elif args.dataset=="eth3d":
if args.split == "test":
scans = ['botanical_garden', 'boulders', 'bridge', 'door',
'exhibition_hall', 'lecture_room', 'living_room', 'lounge',
'observatory', 'old_computer', 'statue', 'terrace_2']
geo_mask_thres = {'botanical_garden':1, # 30 images, outdoor
'boulders':1, # 26 images, outdoor
'bridge':2, # 110 images, outdoor
'door':2, # 6 images, indoor
'exhibition_hall':2, # 68 images, indoor
'lecture_room':2, # 23 images, indoor
'living_room':2, # 65 images, indoor
'lounge':1,# 10 images, indoor
'observatory':2, # 27 images, outdoor
'old_computer':2, # 54 images, indoor
'statue':2, # 10 images, indoor
'terrace_2':2 # 13 images, outdoor
}
for scan in scans:
start_time = time.time()
scan_folder = os.path.join(args.testpath, scan)
out_folder = os.path.join(args.outdir, scan)
filter_depth(scan_folder, out_folder, os.path.join(args.outdir, scan + '.ply'),
args.geo_pixel_thres, args.geo_depth_thres, args.photo_thres, img_wh, geo_mask_thres[scan])
print('scan: '+scan+' time = {:3f}'.format(time.time() - start_time))
elif args.split == "train":
scans = ['courtyard', 'delivery_area', 'electro', 'facade',
'kicker', 'meadow', 'office', 'pipes', 'playground',
'relief', 'relief_2', 'terrace', 'terrains']
geo_mask_thres = {'courtyard':1, # 38 images, outdoor
'delivery_area':2, # 44 images, indoor
'electro':1, # 45 images, outdoor
'facade':2, # 76 images, outdoor
'kicker':1, # 31 images, indoor
'meadow':1, # 15 images, outdoor
'office':1, # 26 images, indoor
'pipes':1,# 14 images, indoor
'playground':1, # 38 images, outdoor
'relief':1, # 31 images, indoor
'relief_2':1, # 31 images, indoor
'terrace':1, # 23 images, outdoor
'terrains':2 # 42 images, indoor
}
for scan in scans:
start_time = time.time()
scan_folder = os.path.join(args.testpath, scan)
out_folder = os.path.join(args.outdir, scan)
filter_depth(scan_folder, out_folder, os.path.join(args.outdir, scan + '.ply'),
args.geo_pixel_thres, args.geo_depth_thres, args.photo_thres, img_wh, geo_mask_thres[scan])
print('scan: '+scan+' time = {:3f}'.format(time.time() - start_time))
else:
filter_depth(args.testpath, args.outdir, os.path.join(args.outdir, 'custom.ply'),
args.geo_pixel_thres, args.geo_depth_thres, args.photo_thres, img_wh, geo_mask_thres=3)
| 21,822 | 47.388027 | 138 | py |
IGEV | IGEV-main/IGEV-MVS/core/corr.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodule import *
class CorrBlock1D_Cost_Volume:
def __init__(self, init_corr, corr, num_levels=2, radius=4, inverse_depth_min=None, inverse_depth_max=None, num_sample=None):
self.num_levels = 2
self.radius = radius
self.inverse_depth_min = inverse_depth_min
self.inverse_depth_max = inverse_depth_max
self.num_sample = num_sample
self.corr_pyramid = []
self.init_corr_pyramid = []
# all pairs correlation
# batch, h1, w1, dim, w2 = corr.shape
b, c, d, h, w = corr.shape
corr = corr.permute(0, 3, 4, 1, 2).reshape(b*h*w, 1, 1, d)
init_corr = init_corr.permute(0, 3, 4, 1, 2).reshape(b*h*w, 1, 1, d)
self.corr_pyramid.append(corr)
self.init_corr_pyramid.append(init_corr)
for i in range(self.num_levels):
corr = F.avg_pool2d(corr, [1,2], stride=[1,2])
self.corr_pyramid.append(corr)
for i in range(self.num_levels):
init_corr = F.avg_pool2d(init_corr, [1,2], stride=[1,2])
self.init_corr_pyramid.append(init_corr)
def __call__(self, disp):
r = self.radius
b, _, h, w = disp.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
init_corr = self.init_corr_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1)
dx = dx.view(1, 1, 2*r+1, 1).to(disp.device)
x0 = dx + disp.reshape(b*h*w, 1, 1, 1) / 2**i
y0 = torch.zeros_like(x0)
disp_lvl = torch.cat([x0,y0], dim=-1)
corr = bilinear_sampler(corr, disp_lvl)
corr = corr.view(b, h, w, -1)
init_corr = bilinear_sampler(init_corr, disp_lvl)
init_corr = init_corr.view(b, h, w, -1)
out_pyramid.append(corr)
out_pyramid.append(init_corr)
out = torch.cat(out_pyramid, dim=-1)
return out.permute(0, 3, 1, 2).contiguous().float() | 2,064 | 32.852459 | 129 | py |
IGEV | IGEV-main/IGEV-MVS/core/update.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodule import *
class BasicMotionEncoder(nn.Module):
def __init__(self):
super(BasicMotionEncoder, self).__init__()
self.corr_levels = 2
self.corr_radius = 4
cor_planes = 2 * self.corr_levels * (2*self.corr_radius + 1)
self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0)
self.convc2 = nn.Conv2d(64, 64, 3, padding=1)
self.convd1 = nn.Conv2d(1, 64, 7, padding=3)
self.convd2 = nn.Conv2d(64, 64, 3, padding=1)
self.conv = nn.Conv2d(64+64, 128-1, 3, padding=1)
def forward(self, disp, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
disp_ = F.relu(self.convd1(disp))
disp_ = F.relu(self.convd2(disp_))
cor_disp = torch.cat([cor, disp_], dim=1)
out = F.relu(self.conv(cor_disp))
return torch.cat([out, disp], dim=1)
class ConvGRU(nn.Module):
def __init__(self, hidden_dim, input_dim, kernel_size=3):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
def forward(self, h, *x_list):
x = torch.cat(x_list, dim=1)
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class DispHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256, output_dim=1):
super(DispHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, output_dim, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class BasicMultiUpdateBlock(nn.Module):
def __init__(self, hidden_dims=[]):
super().__init__()
self.n_gru_layers = 3
self.n_downsample = 2
self.encoder = BasicMotionEncoder()
encoder_output_dim = 128
self.gru04 = ConvGRU(hidden_dims[2], encoder_output_dim + hidden_dims[1] * (self.n_gru_layers > 1))
self.gru08 = ConvGRU(hidden_dims[1], hidden_dims[0] * (self.n_gru_layers == 3) + hidden_dims[2])
self.gru16 = ConvGRU(hidden_dims[0], hidden_dims[1])
self.disp_head = DispHead(hidden_dims[2], hidden_dim=256, output_dim=1)
factor = 2**self.n_downsample
self.mask_feat_4 = nn.Sequential(
nn.Conv2d(hidden_dims[2], 32, 3, padding=1),
nn.ReLU(inplace=True))
def forward(self, net, corr=None, disp=None, iter04=True, iter08=True, iter16=True, update=True):
if iter16:
net[2] = self.gru16(net[2], pool2x(net[1]))
if iter08:
if self.n_gru_layers > 2:
net[1] = self.gru08(net[1], pool2x(net[0]), interp(net[2], net[1]))
else:
net[1] = self.gru08(net[1], pool2x(net[0]))
if iter04:
motion_features = self.encoder(disp, corr)
if self.n_gru_layers > 1:
net[0] = self.gru04(net[0], motion_features, interp(net[1], net[0]))
else:
net[0] = self.gru04(net[0], motion_features)
if not update:
return net
delta_disp = self.disp_head(net[0])
mask_feat_4 = self.mask_feat_4(net[0])
return net, mask_feat_4, delta_disp | 3,718 | 38.56383 | 107 | py |
IGEV | IGEV-main/IGEV-MVS/core/submodule.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, bn=True, relu=True, **kwargs):
super(BasicConv, self).__init__()
self.relu = relu
self.use_bn = bn
if is_3d:
if deconv:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm3d(out_channels)
else:
if deconv:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.relu:
x = nn.LeakyReLU()(x)#, inplace=True)
return x
class BasicConv_IN(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, IN=True, relu=True, **kwargs):
super(BasicConv_IN, self).__init__()
self.relu = relu
self.use_in = IN
if is_3d:
if deconv:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
self.IN = nn.InstanceNorm3d(out_channels)
else:
if deconv:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.IN = nn.InstanceNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.use_in:
x = self.IN(x)
if self.relu:
x = nn.LeakyReLU()(x)#, inplace=True)
return x
class Conv2x(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, concat=True, keep_concat=True, bn=True, relu=True, keep_dispc=False):
super(Conv2x, self).__init__()
self.concat = concat
self.is_3d = is_3d
if deconv and is_3d:
kernel = (4, 4, 4)
elif deconv:
kernel = 4
else:
kernel = 3
if deconv and is_3d and keep_dispc:
kernel = (1, 4, 4)
stride = (1, 2, 2)
padding = (0, 1, 1)
self.conv1 = BasicConv(in_channels, out_channels, deconv, is_3d, bn=True, relu=True, kernel_size=kernel, stride=stride, padding=padding)
else:
self.conv1 = BasicConv(in_channels, out_channels, deconv, is_3d, bn=True, relu=True, kernel_size=kernel, stride=2, padding=1)
if self.concat:
mul = 2 if keep_concat else 1
self.conv2 = BasicConv(out_channels*2, out_channels*mul, False, is_3d, bn, relu, kernel_size=3, stride=1, padding=1)
else:
self.conv2 = BasicConv(out_channels, out_channels, False, is_3d, bn, relu, kernel_size=3, stride=1, padding=1)
def forward(self, x, rem):
x = self.conv1(x)
if x.shape != rem.shape:
x = F.interpolate(
x,
size=(rem.shape[-2], rem.shape[-1]),
mode='nearest')
if self.concat:
x = torch.cat((x, rem), 1)
else:
x = x + rem
x = self.conv2(x)
return x
class Conv2x_IN(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, concat=True, keep_concat=True, IN=True, relu=True, keep_dispc=False):
super(Conv2x_IN, self).__init__()
self.concat = concat
self.is_3d = is_3d
if deconv and is_3d:
kernel = (4, 4, 4)
elif deconv:
kernel = 4
else:
kernel = 3
if deconv and is_3d and keep_dispc:
kernel = (1, 4, 4)
stride = (1, 2, 2)
padding = (0, 1, 1)
self.conv1 = BasicConv_IN(in_channels, out_channels, deconv, is_3d, IN=True, relu=True, kernel_size=kernel, stride=stride, padding=padding)
else:
self.conv1 = BasicConv_IN(in_channels, out_channels, deconv, is_3d, IN=True, relu=True, kernel_size=kernel, stride=2, padding=1)
if self.concat:
mul = 2 if keep_concat else 1
self.conv2 = BasicConv_IN(out_channels*2, out_channels*mul, False, is_3d, IN, relu, kernel_size=3, stride=1, padding=1)
else:
self.conv2 = BasicConv_IN(out_channels, out_channels, False, is_3d, IN, relu, kernel_size=3, stride=1, padding=1)
def forward(self, x, rem):
x = self.conv1(x)
if x.shape != rem.shape:
x = F.interpolate(
x,
size=(rem.shape[-2], rem.shape[-1]),
mode='nearest')
if self.concat:
x = torch.cat((x, rem), 1)
else:
x = x + rem
x = self.conv2(x)
return x
class ConvReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1, dilation=1):
super(ConvReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False)
def forward(self,x):
return F.relu(self.conv(x), inplace=True)
class DepthInitialization(nn.Module):
def __init__(self, num_sample):
super(DepthInitialization, self).__init__()
self.num_sample = num_sample
def forward(self, inverse_depth_min, inverse_depth_max, height, width, device):
batch = inverse_depth_min.size()[0]
index = torch.arange(0, self.num_sample, 1, device=device).view(1, self.num_sample, 1, 1).float()
normalized_sample = index.repeat(batch, 1, height, width) / (self.num_sample-1)
depth_sample = inverse_depth_max + normalized_sample * (inverse_depth_min - inverse_depth_max)
depth_sample = 1.0 / depth_sample
return depth_sample
class PixelViewWeight(nn.Module):
def __init__(self, G):
super(PixelViewWeight, self).__init__()
self.conv = nn.Sequential(
ConvReLU(G, 16),
nn.Conv2d(16, 1, 1, stride=1, padding=0),
)
def forward(self, x):
# x: [B, G, N, H, W]
batch, dim, num_depth, height, width = x.size()
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(batch*num_depth, dim, height, width) # [B*N,G,H,W]
x =self.conv(x).view(batch, num_depth, height, width)
x = torch.softmax(x,dim=1)
x = torch.max(x, dim=1)[0]
return x.unsqueeze(1)
class FeatureAtt(nn.Module):
def __init__(self, cv_chan, feat_chan):
super(FeatureAtt, self).__init__()
self.feat_att = nn.Sequential(
BasicConv(feat_chan, feat_chan//2, kernel_size=1, stride=1, padding=0),
nn.Conv2d(feat_chan//2, cv_chan, 1))
def forward(self, cv, feat):
'''
'''
feat_att = self.feat_att(feat).unsqueeze(2)
cv = torch.sigmoid(feat_att)*cv
return cv
class hourglass(nn.Module):
def __init__(self, in_channels):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(BasicConv(in_channels, in_channels*2, is_3d=True, bn=True, relu=True, kernel_size=3,
padding=1, stride=2, dilation=1),
BasicConv(in_channels*2, in_channels*2, is_3d=True, bn=True, relu=True, kernel_size=3,
padding=1, stride=1, dilation=1))
self.conv2 = nn.Sequential(BasicConv(in_channels*2, in_channels*4, is_3d=True, bn=True, relu=True, kernel_size=3,
padding=1, stride=2, dilation=1),
BasicConv(in_channels*4, in_channels*4, is_3d=True, bn=True, relu=True, kernel_size=3,
padding=1, stride=1, dilation=1))
self.conv3 = nn.Sequential(BasicConv(in_channels*4, in_channels*6, is_3d=True, bn=True, relu=True, kernel_size=3,
padding=1, stride=2, dilation=1),
BasicConv(in_channels*6, in_channels*6, is_3d=True, bn=True, relu=True, kernel_size=3,
padding=1, stride=1, dilation=1))
self.conv3_up = BasicConv(in_channels*6, in_channels*4, deconv=True, is_3d=True, bn=True,
relu=True, kernel_size=(4, 4, 4), padding=(1, 1, 1), stride=(2, 2, 2))
self.conv2_up = BasicConv(in_channels*4, in_channels*2, deconv=True, is_3d=True, bn=True,
relu=True, kernel_size=(4, 4, 4), padding=(1, 1, 1), stride=(2, 2, 2))
self.conv1_up = BasicConv(in_channels*2, 1, deconv=True, is_3d=True, bn=False,
relu=False, kernel_size=(4, 4, 4), padding=(1, 1, 1), stride=(2, 2, 2))
self.agg_0 = nn.Sequential(BasicConv(in_channels*8, in_channels*4, is_3d=True, kernel_size=1, padding=0, stride=1),
BasicConv(in_channels*4, in_channels*4, is_3d=True, kernel_size=3, padding=1, stride=1),
BasicConv(in_channels*4, in_channels*4, is_3d=True, kernel_size=3, padding=1, stride=1),)
self.agg_1 = nn.Sequential(BasicConv(in_channels*4, in_channels*2, is_3d=True, kernel_size=1, padding=0, stride=1),
BasicConv(in_channels*2, in_channels*2, is_3d=True, kernel_size=3, padding=1, stride=1),
BasicConv(in_channels*2, in_channels*2, is_3d=True, kernel_size=3, padding=1, stride=1))
self.feature_att_8 = FeatureAtt(in_channels*2, 64)
self.feature_att_16 = FeatureAtt(in_channels*4, 192)
self.feature_att_32 = FeatureAtt(in_channels*6, 160)
self.feature_att_up_16 = FeatureAtt(in_channels*4, 192)
self.feature_att_up_8 = FeatureAtt(in_channels*2, 64)
def forward(self, x, features):
conv1 = self.conv1(x)
conv1 = self.feature_att_8(conv1, features[1])
conv2 = self.conv2(conv1)
conv2 = self.feature_att_16(conv2, features[2])
conv3 = self.conv3(conv2)
conv3 = self.feature_att_32(conv3, features[3])
conv3_up = self.conv3_up(conv3)
conv2 = torch.cat((conv3_up, conv2), dim=1)
conv2 = self.agg_0(conv2)
conv2 = self.feature_att_up_16(conv2, features[2])
conv2_up = self.conv2_up(conv2)
conv1 = torch.cat((conv2_up, conv1), dim=1)
conv1 = self.agg_1(conv1)
conv1 = self.feature_att_up_8(conv1, features[1])
conv = self.conv1_up(conv1)
return conv
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1,1], dim=-1)
xgrid = 2*xgrid/(W-1) - 1
assert torch.unique(ygrid).numel() == 1 and H == 1 # This is a stereo problem
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
return img, mask.float()
return img
def context_upsample(disp_low, up_weights):
###
# cv (b,1,h,w)
# sp (b,9,4*h,4*w)
###
b, c, h, w = disp_low.shape
disp_unfold = F.unfold(disp_low.reshape(b,c,h,w),3,1,1).reshape(b,-1,h,w)
disp_unfold = F.interpolate(disp_unfold,(h*4,w*4),mode='nearest').reshape(b,9,h*4,w*4)
disp = (disp_unfold*up_weights).sum(1)
return disp
def pool2x(x):
return F.avg_pool2d(x, 3, stride=2, padding=1)
def interp(x, dest):
interp_args = {'mode': 'bilinear', 'align_corners': True}
return F.interpolate(x, dest.shape[2:], **interp_args)
def differentiable_warping(src_fea, src_proj, ref_proj, depth_samples, return_mask=False):
# src_fea: [B, C, H, W]
# src_proj: [B, 4, 4]
# ref_proj: [B, 4, 4]
# depth_samples: [B, Ndepth, H, W]
# out: [B, C, Ndepth, H, W]
batch, num_depth, height, width = depth_samples.size()
height1, width1 = src_fea.size()[2:]
with torch.no_grad():
if batch==2:
inv_ref_proj = []
for i in range(batch):
inv_ref_proj.append(torch.inverse(ref_proj[i]).unsqueeze(0))
inv_ref_proj = torch.cat(inv_ref_proj, dim=0)
assert (not torch.isnan(inv_ref_proj).any()), "nan in inverse(ref_proj)"
proj = torch.matmul(src_proj, inv_ref_proj)
else:
proj = torch.matmul(src_proj, torch.inverse(ref_proj))
assert (not torch.isnan(proj).any()), "nan in proj"
rot = proj[:, :3, :3] # [B,3,3]
trans = proj[:, :3, 3:4] # [B,3,1]
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=depth_samples.device),
torch.arange(0, width, dtype=torch.float32, device=depth_samples.device)])
y, x = y.contiguous(), x.contiguous()
y, x = y.view(height * width), x.view(height * width)
y = y*(height1/height)
x = x*(width1/width)
xyz = torch.stack((x, y, torch.ones_like(x))) # [3, H*W]
xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1) # [B, 3, H*W]
rot_xyz = torch.matmul(rot, xyz) # [B, 3, H*W]
rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_samples.view(batch, 1, num_depth,
height * width) # [B, 3, Ndepth, H*W]
proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1) # [B, 3, Ndepth, H*W]
# avoid negative depth
valid_mask = proj_xyz[:, 2:] > 1e-2
proj_xyz[:, 0:1][~valid_mask] = width
proj_xyz[:, 1:2][~valid_mask] = height
proj_xyz[:, 2:3][~valid_mask] = 1
proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :] # [B, 2, Ndepth, H*W]
valid_mask = valid_mask & (proj_xy[:, 0:1] >=0) & (proj_xy[:, 0:1] < width) \
& (proj_xy[:, 1:2] >=0) & (proj_xy[:, 1:2] < height)
proj_x_normalized = proj_xy[:, 0, :, :] / ((width1 - 1) / 2) - 1 # [B, Ndepth, H*W]
proj_y_normalized = proj_xy[:, 1, :, :] / ((height1 - 1) / 2) - 1
proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3) # [B, Ndepth, H*W, 2]
grid = proj_xy
dim = src_fea.size()[1]
warped_src_fea = F.grid_sample(src_fea, grid.view(batch, num_depth * height, width, 2), mode='bilinear',
padding_mode='zeros',align_corners=True)
warped_src_fea = warped_src_fea.view(batch, dim, num_depth, height, width)
if return_mask:
valid_mask = valid_mask.view(batch,num_depth,height,width)
return warped_src_fea, valid_mask
else:
return warped_src_fea
def depth_normalization(depth, inverse_depth_min, inverse_depth_max):
'''convert depth map to the index in inverse range'''
inverse_depth = 1.0 / (depth+1e-5)
normalized_depth = (inverse_depth - inverse_depth_max) / (inverse_depth_min - inverse_depth_max)
return normalized_depth
def depth_unnormalization(normalized_depth, inverse_depth_min, inverse_depth_max):
'''convert the index in inverse range to depth map'''
inverse_depth = inverse_depth_max + normalized_depth * (inverse_depth_min - inverse_depth_max) # [B,1,H,W]
depth = 1.0 / inverse_depth
return depth | 16,724 | 41.234848 | 151 | py |
IGEV | IGEV-main/IGEV-MVS/core/igev_mvs.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodule import *
from .corr import *
from .extractor import *
from .update import *
try:
autocast = torch.cuda.amp.autocast
except:
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class IGEVMVS(nn.Module):
def __init__(self, args):
super().__init__()
context_dims = [128, 128, 128]
self.n_gru_layers = 3
self.slow_fast_gru = False
self.mixed_precision = True
self.num_sample = 64
self.G = 1
self.corr_radius = 4
self.corr_levels = 2
self.iters = args.iteration
self.update_block = BasicMultiUpdateBlock(hidden_dims=context_dims)
self.conv_hidden_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1)
self.conv_hidden_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=2)
self.conv_hidden_4 = nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=2)
self.feature = Feature()
self.stem_2 = nn.Sequential(
BasicConv_IN(3, 32, kernel_size=3, stride=2, padding=1),
nn.Conv2d(32, 32, 3, 1, 1, bias=False),
nn.InstanceNorm2d(32), nn.ReLU()
)
self.stem_4 = nn.Sequential(
BasicConv_IN(32, 48, kernel_size=3, stride=2, padding=1),
nn.Conv2d(48, 48, 3, 1, 1, bias=False),
nn.InstanceNorm2d(48), nn.ReLU()
)
self.conv = BasicConv_IN(96, 48, kernel_size=3, padding=1, stride=1)
self.desc = nn.Conv2d(48, 48, kernel_size=1, padding=0, stride=1)
self.spx = nn.Sequential(nn.ConvTranspose2d(2*32, 9, kernel_size=4, stride=2, padding=1),)
self.spx_2 = Conv2x_IN(32, 32, True)
self.spx_4 = nn.Sequential(
BasicConv_IN(96, 32, kernel_size=3, stride=1, padding=1),
nn.Conv2d(32, 32, 3, 1, 1, bias=False),
nn.InstanceNorm2d(32), nn.ReLU()
)
self.depth_initialization = DepthInitialization(self.num_sample)
self.pixel_view_weight = PixelViewWeight(self.G)
self.corr_stem = BasicConv(1, 8, is_3d=True, kernel_size=3, stride=1, padding=1)
self.corr_feature_att = FeatureAtt(8, 96)
self.cost_agg = hourglass(8)
self.spx_2_gru = Conv2x(32, 32, True)
self.spx_gru = nn.Sequential(nn.ConvTranspose2d(2*32, 9, kernel_size=4, stride=2, padding=1),)
def upsample_disp(self, depth, mask_feat_4, stem_2x):
with autocast(enabled=self.mixed_precision):
xspx = self.spx_2_gru(mask_feat_4, stem_2x)
spx_pred = self.spx_gru(xspx)
spx_pred = F.softmax(spx_pred, 1)
up_depth = context_upsample(depth, spx_pred).unsqueeze(1)
return up_depth
def forward(self, imgs, proj_matrices, depth_min, depth_max, test_mode=False):
proj_matrices_2 = torch.unbind(proj_matrices['level_2'].float(), 1)
depth_min = depth_min.float()
depth_max = depth_max.float()
ref_proj = proj_matrices_2[0]
src_projs = proj_matrices_2[1:]
with autocast(enabled=self.mixed_precision):
images = torch.unbind(imgs['level_0'], dim=1)
features = self.feature(imgs['level_0'])
ref_feature = []
for fea in features:
ref_feature.append(torch.unbind(fea, dim=1)[0])
src_features = [src_fea for src_fea in torch.unbind(features[0], dim=1)[1:]]
stem_2x = self.stem_2(images[0])
stem_4x = self.stem_4(stem_2x)
ref_feature[0] = torch.cat((ref_feature[0], stem_4x), 1)
for idx, src_fea in enumerate(src_features):
stem_2y = self.stem_2(images[idx + 1])
stem_4y = self.stem_4(stem_2y)
src_features[idx] = torch.cat((src_fea, stem_4y), 1)
match_left = self.desc(self.conv(ref_feature[0]))
match_left = match_left / torch.norm(match_left, 2, 1, True)
match_rights = [self.desc(self.conv(src_fea)) for src_fea in src_features]
match_rights = [match_right / torch.norm(match_right, 2, 1, True) for match_right in match_rights]
xspx = self.spx_4(ref_feature[0])
xspx = self.spx_2(xspx, stem_2x)
spx_pred = self.spx(xspx)
spx_pred = F.softmax(spx_pred, 1)
batch, dim, height, width = match_left.size()
inverse_depth_min = (1.0 / depth_min).view(batch, 1, 1, 1)
inverse_depth_max = (1.0 / depth_max).view(batch, 1, 1, 1)
device = match_left.device
correlation_sum = 0
view_weight_sum = 1e-5
match_left = match_left.float()
depth_samples = self.depth_initialization(inverse_depth_min, inverse_depth_max, height, width, device)
for src_feature, src_proj in zip(match_rights, src_projs):
src_feature = src_feature.float()
warped_feature = differentiable_warping(src_feature, src_proj, ref_proj, depth_samples)
warped_feature = warped_feature.view(batch, self.G, dim // self.G, self.num_sample, height, width)
correlation = torch.mean(warped_feature * match_left.view(batch, self.G, dim // self.G, 1, height, width), dim=2, keepdim=False)
view_weight = self.pixel_view_weight(correlation)
del warped_feature, src_feature, src_proj
correlation_sum += correlation * view_weight.unsqueeze(1)
view_weight_sum += view_weight.unsqueeze(1)
del correlation, view_weight
del match_left, match_rights, src_projs
with autocast(enabled=self.mixed_precision):
init_corr_volume = correlation_sum.div_(view_weight_sum)
corr_volume = self.corr_stem(init_corr_volume)
corr_volume = self.corr_feature_att(corr_volume, ref_feature[0])
regularized_cost_volume = self.cost_agg(corr_volume, ref_feature)
GEV_hidden = self.conv_hidden_1(regularized_cost_volume.squeeze(1))
GEV_hidden_2 = self.conv_hidden_2(GEV_hidden)
GEV_hidden_4 = self.conv_hidden_4(GEV_hidden_2)
net_list = [GEV_hidden, GEV_hidden_2, GEV_hidden_4]
net_list = [torch.tanh(x) for x in net_list]
corr_block = CorrBlock1D_Cost_Volume
init_corr_volume = init_corr_volume.float()
regularized_cost_volume = regularized_cost_volume.float()
probability = F.softmax(regularized_cost_volume.squeeze(1), dim=1)
index = torch.arange(0, self.num_sample, 1, device=probability.device).view(1, self.num_sample, 1, 1).float()
disp_init = torch.sum(index * probability, dim = 1, keepdim=True)
corr_fn = corr_block(init_corr_volume, regularized_cost_volume, radius=self.corr_radius, num_levels=self.corr_levels, inverse_depth_min=inverse_depth_min, inverse_depth_max=inverse_depth_max, num_sample=self.num_sample)
disp_predictions = []
disp = disp_init
for itr in range(self.iters):
disp = disp.detach()
corr = corr_fn(disp)
with autocast(enabled=self.mixed_precision):
if self.n_gru_layers == 3 and self.slow_fast_gru: # Update low-res GRU
net_list = self.update_block(net_list, iter16=True, iter08=False, iter04=False, update=False)
if self.n_gru_layers >= 2 and self.slow_fast_gru:# Update low-res GRU and mid-res GRU
net_list = self.update_block(net_list, iter16=self.n_gru_layers==3, iter08=True, iter04=False, update=False)
net_list, mask_feat_4, delta_disp = self.update_block(net_list, corr, disp, iter16=self.n_gru_layers==3, iter08=self.n_gru_layers>=2)
disp = disp + delta_disp
if test_mode and itr < self.iters-1:
continue
disp_up = self.upsample_disp(disp, mask_feat_4, stem_2x) / (self.num_sample-1)
disp_predictions.append(disp_up)
disp_init = context_upsample(disp_init, spx_pred.float()).unsqueeze(1) / (self.num_sample-1)
if test_mode:
return disp_up
return disp_init, disp_predictions
| 8,325 | 41.479592 | 227 | py |
IGEV | IGEV-main/IGEV-MVS/core/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import timm
import math
from .submodule import *
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not (stride == 1 and in_planes == planes):
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not (stride == 1 and in_planes == planes):
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not (stride == 1 and in_planes == planes):
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not (stride == 1 and in_planes == planes):
self.norm3 = nn.Sequential()
if stride == 1 and in_planes == planes:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.conv1(y)
y = self.norm1(y)
y = self.relu(y)
y = self.conv2(y)
y = self.norm2(y)
y = self.relu(y)
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class MultiBasicEncoder(nn.Module):
def __init__(self, output_dim=[128], norm_fn='batch', dropout=0.0, downsample=3):
super(MultiBasicEncoder, self).__init__()
self.norm_fn = norm_fn
self.downsample = downsample
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1 + (downsample > 2), padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=1 + (downsample > 1))
self.layer3 = self._make_layer(128, stride=1 + (downsample > 0))
self.layer4 = self._make_layer(128, stride=2)
self.layer5 = self._make_layer(128, stride=2)
output_list = []
for dim in output_dim:
conv_out = nn.Sequential(
ResidualBlock(128, 128, self.norm_fn, stride=1),
nn.Conv2d(128, dim[2], 3, padding=1))
output_list.append(conv_out)
self.outputs04 = nn.ModuleList(output_list)
output_list = []
for dim in output_dim:
conv_out = nn.Sequential(
ResidualBlock(128, 128, self.norm_fn, stride=1),
nn.Conv2d(128, dim[1], 3, padding=1))
output_list.append(conv_out)
self.outputs08 = nn.ModuleList(output_list)
output_list = []
for dim in output_dim:
conv_out = nn.Conv2d(128, dim[0], 3, padding=1)
output_list.append(conv_out)
self.outputs16 = nn.ModuleList(output_list)
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
else:
self.dropout = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x, dual_inp=False, num_layers=3):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if dual_inp:
v = x
x = x[:(x.shape[0]//2)]
outputs04 = [f(x) for f in self.outputs04]
if num_layers == 1:
return (outputs04, v) if dual_inp else (outputs04,)
y = self.layer4(x)
outputs08 = [f(y) for f in self.outputs08]
if num_layers == 2:
return (outputs04, outputs08, v) if dual_inp else (outputs04, outputs08)
z = self.layer5(y)
outputs16 = [f(z) for f in self.outputs16]
return (outputs04, outputs08, outputs16, v) if dual_inp else (outputs04, outputs08, outputs16)
class Feature(SubModule):
def __init__(self):
super(Feature, self).__init__()
pretrained = True
model = timm.create_model('mobilenetv2_100', pretrained=pretrained, features_only=True)
layers = [1,2,3,5,6]
chans = [16, 24, 32, 96, 160]
self.conv_stem = model.conv_stem
self.bn1 = model.bn1
self.block0 = torch.nn.Sequential(*model.blocks[0:layers[0]])
self.block1 = torch.nn.Sequential(*model.blocks[layers[0]:layers[1]])
self.block2 = torch.nn.Sequential(*model.blocks[layers[1]:layers[2]])
self.block3 = torch.nn.Sequential(*model.blocks[layers[2]:layers[3]])
self.block4 = torch.nn.Sequential(*model.blocks[layers[3]:layers[4]])
self.deconv32_16 = Conv2x_IN(chans[4], chans[3], deconv=True, concat=True)
self.deconv16_8 = Conv2x_IN(chans[3]*2, chans[2], deconv=True, concat=True)
self.deconv8_4 = Conv2x_IN(chans[2]*2, chans[1], deconv=True, concat=True)
self.conv4 = BasicConv_IN(chans[1]*2, chans[1]*2, kernel_size=3, stride=1, padding=1)
def forward(self, x):
B, V, _, H, W = x.size()
x = x.view(B * V, -1, H, W)
#x = self.act1(self.bn1(self.conv_stem(x)))
x = self.bn1(self.conv_stem(x))
x2 = self.block0(x)
x4 = self.block1(x2)
# return x4,x4,x4,x4
x8 = self.block2(x4)
x16 = self.block3(x8)
x32 = self.block4(x16)
x16 = self.deconv32_16(x32, x16)
x8 = self.deconv16_8(x16, x8)
x4 = self.deconv8_4(x8, x4)
x4 = self.conv4(x4)
x4 = x4.view(B, V, -1, H // 4, W // 4)
x8 = x8.view(B, V, -1, H // 8, W // 8)
x16 = x16.view(B, V, -1, H // 16, W // 16)
x32 = x32.view(B, V, -1, H // 32, W // 32)
return [x4, x8, x16, x32] | 7,497 | 34.367925 | 102 | py |
IGEV | IGEV-main/IGEV-MVS/datasets/custom.py | from torch.utils.data import Dataset
from datasets.data_io import *
import os
import numpy as np
import cv2
from PIL import Image
from torchvision import transforms as T
import math
class MVSDataset(Dataset):
def __init__(self, datapath, n_views=5, img_wh=(640,480)):
self.levels = 4
self.datapath = datapath
self.img_wh = img_wh
self.build_metas()
self.n_views = n_views
def build_metas(self):
self.metas = []
with open(os.path.join(self.datapath, 'pair.txt')) as f:
num_viewpoint = int(f.readline())
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
if len(src_views) != 0:
self.metas += [(ref_view, src_views)]
def read_cam_file(self, filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
depth_min = float(lines[11].split()[0])
depth_max = float(lines[11].split()[-1])
return intrinsics, extrinsics, depth_min, depth_max
def read_img(self, filename, h, w):
img = Image.open(filename)
# scale 0~255 to -1~1
np_img = 2*np.array(img, dtype=np.float32) / 255. - 1
original_h, original_w, _ = np_img.shape
np_img = cv2.resize(np_img, self.img_wh, interpolation=cv2.INTER_LINEAR)
np_img_ms = {
"level_3": cv2.resize(np_img, (w//8, h//8), interpolation=cv2.INTER_LINEAR),
"level_2": cv2.resize(np_img, (w//4, h//4), interpolation=cv2.INTER_LINEAR),
"level_1": cv2.resize(np_img, (w//2, h//2), interpolation=cv2.INTER_LINEAR),
"level_0": np_img
}
return np_img_ms, original_h, original_w
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
ref_view, src_views = self.metas[idx]
# use only the reference view and first nviews-1 source views
view_ids = [ref_view] + src_views[:self.n_views-1]
imgs_0 = []
imgs_1 = []
imgs_2 = []
imgs_3 = []
# depth = None
depth_min = None
depth_max = None
proj_matrices_0 = []
proj_matrices_1 = []
proj_matrices_2 = []
proj_matrices_3 = []
for i, vid in enumerate(view_ids):
img_filename = os.path.join(self.datapath, f'images/{vid:08d}.jpg')
proj_mat_filename = os.path.join(self.datapath, f'cams_1/{vid:08d}_cam.txt')
imgs, original_h, original_w = self.read_img(img_filename,self.img_wh[1], self.img_wh[0])
imgs_0.append(imgs['level_0'])
imgs_1.append(imgs['level_1'])
imgs_2.append(imgs['level_2'])
imgs_3.append(imgs['level_3'])
intrinsics, extrinsics, depth_min_, depth_max_ = self.read_cam_file(proj_mat_filename)
intrinsics[0] *= self.img_wh[0]/original_w
intrinsics[1] *= self.img_wh[1]/original_h
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 0.125
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_3.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_2.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_1.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_0.append(proj_mat)
if i == 0: # reference view
depth_min = depth_min_
depth_max = depth_max_
# imgs: N*3*H0*W0, N is number of images
imgs_0 = np.stack(imgs_0).transpose([0, 3, 1, 2])
imgs_1 = np.stack(imgs_1).transpose([0, 3, 1, 2])
imgs_2 = np.stack(imgs_2).transpose([0, 3, 1, 2])
imgs_3 = np.stack(imgs_3).transpose([0, 3, 1, 2])
imgs = {}
imgs['level_0'] = imgs_0
imgs['level_1'] = imgs_1
imgs['level_2'] = imgs_2
imgs['level_3'] = imgs_3
# proj_matrices: N*4*4
proj_matrices_0 = np.stack(proj_matrices_0)
proj_matrices_1 = np.stack(proj_matrices_1)
proj_matrices_2 = np.stack(proj_matrices_2)
proj_matrices_3 = np.stack(proj_matrices_3)
proj={}
proj['level_3']=proj_matrices_3
proj['level_2']=proj_matrices_2
proj['level_1']=proj_matrices_1
proj['level_0']=proj_matrices_0
return {"imgs": imgs, # N*3*H0*W0
"proj_matrices": proj, # N*4*4
"depth_min": depth_min, # scalar
"depth_max": depth_max,
"filename": '{}/' + '{:0>8}'.format(view_ids[0]) + "{}"
}
| 5,490 | 36.609589 | 101 | py |
IGEV | IGEV-main/IGEV-MVS/datasets/eth3d.py | from torch.utils.data import Dataset
from datasets.data_io import *
import os
import numpy as np
import cv2
from PIL import Image
class MVSDataset(Dataset):
def __init__(self, datapath, split='test', n_views=7, img_wh=(1920,1280)):
self.levels = 4
self.datapath = datapath
self.img_wh = img_wh
self.split = split
self.build_metas()
self.n_views = n_views
def build_metas(self):
self.metas = []
if self.split == "test":
self.scans = ['botanical_garden', 'boulders', 'bridge', 'door',
'exhibition_hall', 'lecture_room', 'living_room', 'lounge',
'observatory', 'old_computer', 'statue', 'terrace_2']
elif self.split == "train":
self.scans = ['courtyard', 'delivery_area', 'electro', 'facade',
'kicker', 'meadow', 'office', 'pipes', 'playground',
'relief', 'relief_2', 'terrace', 'terrains']
for scan in self.scans:
with open(os.path.join(self.datapath, scan, 'pair.txt')) as f:
num_viewpoint = int(f.readline())
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
if len(src_views) != 0:
self.metas += [(scan, -1, ref_view, src_views)]
def read_cam_file(self, filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
depth_min = float(lines[11].split()[0])
if depth_min < 0:
depth_min = 1
depth_max = float(lines[11].split()[-1])
return intrinsics, extrinsics, depth_min, depth_max
def read_img(self, filename, h, w):
img = Image.open(filename)
# scale 0~255 to -1~1
np_img = 2*np.array(img, dtype=np.float32) / 255. - 1
original_h, original_w, _ = np_img.shape
np_img = cv2.resize(np_img, self.img_wh, interpolation=cv2.INTER_LINEAR)
np_img_ms = {
"level_3": cv2.resize(np_img, (w//8, h//8), interpolation=cv2.INTER_LINEAR),
"level_2": cv2.resize(np_img, (w//4, h//4), interpolation=cv2.INTER_LINEAR),
"level_1": cv2.resize(np_img, (w//2, h//2), interpolation=cv2.INTER_LINEAR),
"level_0": np_img
}
return np_img_ms, original_h, original_w
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
scan, _, ref_view, src_views = self.metas[idx]
# use only the reference view and first nviews-1 source views
view_ids = [ref_view] + src_views[:self.n_views-1]
imgs_0 = []
imgs_1 = []
imgs_2 = []
imgs_3 = []
# depth = None
depth_min = None
depth_max = None
proj_matrices_0 = []
proj_matrices_1 = []
proj_matrices_2 = []
proj_matrices_3 = []
for i, vid in enumerate(view_ids):
img_filename = os.path.join(self.datapath, scan, f'images/{vid:08d}.jpg')
proj_mat_filename = os.path.join(self.datapath, scan, f'cams_1/{vid:08d}_cam.txt')
imgs, original_h, original_w = self.read_img(img_filename,self.img_wh[1], self.img_wh[0])
imgs_0.append(imgs['level_0'])
imgs_1.append(imgs['level_1'])
imgs_2.append(imgs['level_2'])
imgs_3.append(imgs['level_3'])
intrinsics, extrinsics, depth_min_, depth_max_ = self.read_cam_file(proj_mat_filename)
intrinsics[0] *= self.img_wh[0]/original_w
intrinsics[1] *= self.img_wh[1]/original_h
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 0.125
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_3.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_2.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_1.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_0.append(proj_mat)
if i == 0: # reference view
depth_min = depth_min_
depth_max = depth_max_
# imgs: N*3*H0*W0, N is number of images
imgs_0 = np.stack(imgs_0).transpose([0, 3, 1, 2])
imgs_1 = np.stack(imgs_1).transpose([0, 3, 1, 2])
imgs_2 = np.stack(imgs_2).transpose([0, 3, 1, 2])
imgs_3 = np.stack(imgs_3).transpose([0, 3, 1, 2])
imgs = {}
imgs['level_0'] = imgs_0
imgs['level_1'] = imgs_1
imgs['level_2'] = imgs_2
imgs['level_3'] = imgs_3
# proj_matrices: N*4*4
proj_matrices_0 = np.stack(proj_matrices_0)
proj_matrices_1 = np.stack(proj_matrices_1)
proj_matrices_2 = np.stack(proj_matrices_2)
proj_matrices_3 = np.stack(proj_matrices_3)
proj={}
proj['level_3']=proj_matrices_3
proj['level_2']=proj_matrices_2
proj['level_1']=proj_matrices_1
proj['level_0']=proj_matrices_0
return {"imgs": imgs, # N*3*H0*W0
"proj_matrices": proj, # N*4*4
"depth_min": depth_min, # scalar
"depth_max": depth_max,
"filename": scan + '/{}/' + '{:0>8}'.format(view_ids[0]) + "{}"
}
| 6,164 | 37.773585 | 101 | py |
IGEV | IGEV-main/IGEV-MVS/datasets/dtu_yao_eval.py | from torch.utils.data import Dataset
import numpy as np
import os
from PIL import Image
from datasets.data_io import *
import cv2
class MVSDataset(Dataset):
def __init__(self, datapath, listfile, nviews=5, img_wh=(1600, 1152)):
super(MVSDataset, self).__init__()
self.levels = 4
self.datapath = datapath
self.listfile = listfile
self.nviews = nviews
self.img_wh = img_wh
self.metas = self.build_list()
def build_list(self):
metas = []
with open(self.listfile) as f:
scans = f.readlines()
scans = [line.rstrip() for line in scans]
for scan in scans:
pair_file = "{}/pair.txt".format(scan)
# read the pair file
with open(os.path.join(self.datapath, pair_file)) as f:
num_viewpoint = int(f.readline())
# viewpoints (49)
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
metas.append((scan, ref_view, src_views))
print("dataset", "metas:", len(metas))
return metas
def __len__(self):
return len(self.metas)
def read_cam_file(self, filename):
with open(filename) as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
depth_min = float(lines[11].split()[0])
depth_max = float(lines[11].split()[-1])
return intrinsics, extrinsics, depth_min, depth_max
def read_mask(self, filename):
img = Image.open(filename)
np_img = np.array(img, dtype=np.float32)
np_img = (np_img > 10).astype(np.float32)
return np_img
def read_img(self, filename):
img = Image.open(filename)
# scale 0~255 to -1~1
np_img = 2*np.array(img, dtype=np.float32) / 255. - 1
np_img = cv2.resize(np_img, self.img_wh, interpolation=cv2.INTER_LINEAR)
h, w, _ = np_img.shape
np_img_ms = {
"level_3": cv2.resize(np_img, (w//8, h//8), interpolation=cv2.INTER_LINEAR),
"level_2": cv2.resize(np_img, (w//4, h//4), interpolation=cv2.INTER_LINEAR),
"level_1": cv2.resize(np_img, (w//2, h//2), interpolation=cv2.INTER_LINEAR),
"level_0": np_img
}
return np_img_ms
def __getitem__(self, idx):
scan, ref_view, src_views = self.metas[idx]
# use only the reference view and first nviews-1 source views
view_ids = [ref_view] + src_views[:self.nviews - 1]
img_w = 1600
img_h = 1200
imgs_0 = []
imgs_1 = []
imgs_2 = []
imgs_3 = []
depth_min = None
depth_max = None
proj_matrices_0 = []
proj_matrices_1 = []
proj_matrices_2 = []
proj_matrices_3 = []
for i, vid in enumerate(view_ids):
img_filename = os.path.join(self.datapath, '{}/images/{:0>8}.jpg'.format(scan, vid))
proj_mat_filename = os.path.join(self.datapath, '{}/cams_1/{:0>8}_cam.txt'.format(scan, vid))
imgs = self.read_img(img_filename)
imgs_0.append(imgs['level_0'])
imgs_1.append(imgs['level_1'])
imgs_2.append(imgs['level_2'])
imgs_3.append(imgs['level_3'])
intrinsics, extrinsics, depth_min_, depth_max_ = self.read_cam_file(proj_mat_filename)
intrinsics[0] *= self.img_wh[0]/img_w
intrinsics[1] *= self.img_wh[1]/img_h
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 0.125
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_3.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_2.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_1.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_0.append(proj_mat)
if i == 0: # reference view
depth_min = depth_min_
depth_max = depth_max_
imgs_0 = np.stack(imgs_0).transpose([0, 3, 1, 2])
imgs_1 = np.stack(imgs_1).transpose([0, 3, 1, 2])
imgs_2 = np.stack(imgs_2).transpose([0, 3, 1, 2])
imgs_3 = np.stack(imgs_3).transpose([0, 3, 1, 2])
imgs = {}
imgs['level_0'] = imgs_0
imgs['level_1'] = imgs_1
imgs['level_2'] = imgs_2
imgs['level_3'] = imgs_3
# proj_matrices: N*4*4
proj_matrices_0 = np.stack(proj_matrices_0)
proj_matrices_1 = np.stack(proj_matrices_1)
proj_matrices_2 = np.stack(proj_matrices_2)
proj_matrices_3 = np.stack(proj_matrices_3)
proj={}
proj['level_3']=proj_matrices_3
proj['level_2']=proj_matrices_2
proj['level_1']=proj_matrices_1
proj['level_0']=proj_matrices_0
return {"imgs": imgs, # N*3*H0*W0
"proj_matrices": proj, # N*4*4
"depth_min": depth_min, # scalar
"depth_max": depth_max, # scalar
"filename": scan + '/{}/' + '{:0>8}'.format(view_ids[0]) + "{}"}
| 5,897 | 36.09434 | 105 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.