repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/load_net.py
|
"""
Utility file to select GraphNN model as
selected by the user
"""
from nets.Planetoid_node_classification.gated_gcn_net import GatedGCNNet, GatedGCNNet_pyg, ResGatedGCNNet_pyg
from nets.Planetoid_node_classification.gcn_net import GCNNet, GCNNet_pyg
from nets.Planetoid_node_classification.gat_net import GATNet, GATNet_pyg
from nets.Planetoid_node_classification.graphsage_net import GraphSageNet, GraphSageNet_pyg
from nets.Planetoid_node_classification.mlp_net import MLPNet, MLPNet_pyg
from nets.Planetoid_node_classification.gin_net import GINNet, GINNet_pyg
from nets.Planetoid_node_classification.mo_net import MoNet as MoNet_, MoNetNet_pyg
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
def GCN(net_params):
return GCNNet(net_params)
def GAT(net_params):
return GATNet(net_params)
def GraphSage(net_params):
return GraphSageNet(net_params)
def MLP(net_params):
return MLPNet(net_params)
def GIN(net_params):
return GINNet(net_params)
def MoNet(net_params):
return MoNet_(net_params)
def GIN_pyg(net_params):
model = GINNet_pyg(net_params)
if net_params['neighbor_aggr_GIN'] == 'mean':
model.aggr = str('mean')
elif net_params['neighbor_aggr_GIN'] == 'max':
model.aggr = str('max')
return model
def MLP_pyg(net_params):
return MLPNet_pyg(net_params)
def GCN_pyg(net_params):
return GCNNet_pyg(net_params)
def GatedGCN_pyg(net_params):
return GatedGCNNet_pyg(net_params)
def ResGatedGCN_pyg(net_params):
return ResGatedGCNNet_pyg(net_params)
def GAT_pyg(net_params):
return GATNet_pyg(net_params)
# self.head = GraphConv(opt.in_channels, channels, conv, act, norm, bias, heads)
def GraphSage_pyg(net_params):
return GraphSageNet_pyg(net_params)
def MoNet_pyg(net_params):
return MoNetNet_pyg(net_params)
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
'GCN': GCN,
'GAT': GAT,
'GraphSage': GraphSage,
'MLP': MLP,
'GIN': GIN,
'MoNet': MoNet,
'MLP_pyg': MLP_pyg,
'GIN_pyg': GIN_pyg,
'GCN_pyg': GCN_pyg,
'GatedGCN_pyg': GatedGCN_pyg,
'GAT_pyg': GAT_pyg,
'GraphSage_pyg': GraphSage_pyg,
'MoNet_pyg': MoNet_pyg,
'ResGatedGCN_pyg': ResGatedGCN_pyg
}
return models[MODEL_NAME](net_params)
| 2,570
| 27.566667
| 109
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/graphsage_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import SAGEConv
class GraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
# the implementation of dgl and pyg are different
"""
class GraphSageNet_pyg(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
self.n_layers = net_params['L']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = nn.Dropout(p=dropout)
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.layers = nn.ModuleList([SAGEConv(hidden_dim, hidden_dim) for _ in
range(self.n_layers - 1)])
self.layers.append(
SAGEConv(hidden_dim, out_dim))
# self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
# dropout, aggregator_type, batch_norm, residual) for _ in
# range(n_layers - 1)])
# self.layers.append(
# GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
if aggregator_type == 'maxpool':
self.aggr = 'max'
elif aggregator_type == 'mean':
self.aggr = 'mean'
# to do
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for i in range(self.n_layers):
h_in = h
h = self.dropout(h)
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
#h = F.relu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
| 5,141
| 35.211268
| 122
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gin_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
# import torch_geometric as tg
from torch_geometric.nn import GINConv
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers+1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, g, h, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class GINNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.normlayers = torch.nn.ModuleList()
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINConv(ApplyNodeFunc(mlp), 0, learn_eps))
# note that neighbor_aggr_type can not work because the father
# self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
# dropout, batch_norm, residual, 0, learn_eps))
if batch_norm:
self.normlayers.append(nn.BatchNorm1d(hidden_dim))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers + 1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h_in = h
h = self.ginlayers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
| 5,612
| 35.448052
| 113
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gcn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout
class GCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
# note that the GCNLayer is a little different from the builtin function,
# it averaging the received message by reduce, not c_{ij} the papers apply
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class GCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = dropout
self.layers = nn.ModuleList([GCNConv(hidden_dim, hidden_dim, improved = False)
for _ in range(self.n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)
for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
# self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# i = 0
# for conv in self.layers:
# h_in = h
# h = conv(h, e)
# if self.batch_norm:
# h = self.normlayers[i](h) # batch normalization
# i += 1
# h = F.relu(h)
# if self.residual:
# h = h_in + h # residual connection
# h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 5,125
| 33.635135
| 110
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gated_gcn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer, ResGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GatedGraphConv
class GatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) for _ in range(n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h, e = conv(g, h, e)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class ResGatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
num_bond_type = 3
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.edge_feat = net_params['edge_feat']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
if self.edge_feat:
self.embedding_e = nn.Embedding(num_bond_type, hidden_dim)
else:
self.embedding_e = nn.Linear(1, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ResGatedGCNLayer(hidden_dim, hidden_dim, self.dropout,
self.batch_norm, self.residual) for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.normlayers_e = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for i in range(self.n_layers):
h_in = h
e_in = e
h, e = self.layers[i](h, edge_index, e)
if self.batch_norm:
h = self.normlayers_h[i](h)
e = self.normlayers_e[i](e) # batch normalization
if self.residual:
h = h_in + h # residual connection
e = e_in + e
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
"""
Gated Graph Sequence Neural Networks
An Experimental Study of Neural Networks for Variable Graphs
Li Y, Tarlow D, Brockschmidt M, et al. Gated graph sequence neural networks[J]. arXiv preprint arXiv:1511.05493, 2015.
https://arxiv.org/abs/1511.05493
Note that the pyg and dgl of the gatedGCN are different models.
"""
class GatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([GatedGraphConv(hidden_dim, n_layers, aggr = 'add')])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h_in = h
h = conv(h, edge_index, e)
if self.batch_norm:
h = self.normlayers[0](h)
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
# dataset.dataset[0].y.view(-1).size()
return loss
| 8,352
| 37.493088
| 122
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/mlp_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from layers.mlp_readout_layer import MLPReadout
class MLPNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers-1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class MLPNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers - 1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 3,772
| 27.583333
| 93
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/mo_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_add
import dgl
import numpy as np
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
from layers.gmm_layer import GMMLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GMMConv
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
aggr_type = "sum" # default for MoNet
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
g.ndata['deg'] = g.in_degrees()
g.apply_edges(self.compute_pseudo)
pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
return self.MLP_layer(h)
def compute_pseudo(self, edges):
# compute pseudo edge features for MoNet
# to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
srcs = 1/np.sqrt(edges.src['deg']+1)
dsts = 1/np.sqrt(edges.dst['deg']+1)
pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
return {'pseudo': pseudo}
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class MoNetNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
self.dim = dim
# aggr_type = "sum" # default for MoNet
aggr_type = "mean"
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(1, dim) # edge feat is a float
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
self.batchnorm_h = nn.ModuleList()
# Hidden layer
for _ in range(self.n_layers - 1):
self.layers.append(GMMConv(hidden_dim, hidden_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(hidden_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMConv(hidden_dim, out_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
# to do
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
edge_weight = torch.ones((edge_index.size(1),),
device = edge_index.device)
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, row, dim=0, dim_size=h.size(0))
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
pseudo = torch.cat((deg_inv_sqrt[row].unsqueeze(-1), deg_inv_sqrt[col].unsqueeze(-1)), dim=1)
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index, self.pseudo_proj[i](pseudo))
if self.batch_norm:
h = self.batchnorm_h[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return self.MLP_layer(h)
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
| 6,420
| 39.639241
| 121
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/graphsage_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import SAGEConv
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
class GraphSageLayer(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.aggregator_type = aggregator_type
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
if dgl_builtin == False:
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout,
bias=bias)
if aggregator_type == "maxpool":
self.aggregator = MaxPoolAggregator(in_feats, in_feats,
activation, bias)
elif aggregator_type == "lstm":
self.aggregator = LSTMAggregator(in_feats, in_feats)
else:
self.aggregator = MeanAggregator()
else:
self.sageconv = SAGEConv(in_feats, out_feats, aggregator_type,
dropout, activation=activation)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def forward(self, g, h):
h_in = h # for residual connection
if self.dgl_builtin == False:
h = self.dropout(h)
g.ndata['h'] = h
g.update_all(fn.copy_src(src='h', out='m'),
self.aggregator,
self.nodeapply)
h = g.ndata['h']
else:
h = self.sageconv(g, h)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, aggregator={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.aggregator_type, self.residual)
"""
Aggregators for GraphSage
"""
class Aggregator(nn.Module):
"""
Base Aggregator class.
"""
def __init__(self):
super().__init__()
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
def aggre(self, neighbour):
# N x F
raise NotImplementedError
class MeanAggregator(Aggregator):
"""
Mean Aggregator for graphsage
"""
def __init__(self):
super().__init__()
def aggre(self, neighbour):
mean_neighbour = torch.mean(neighbour, dim=1)
return mean_neighbour
class MaxPoolAggregator(Aggregator):
"""
Maxpooling aggregator for graphsage
"""
def __init__(self, in_feats, out_feats, activation, bias):
super().__init__()
self.linear = nn.Linear(in_feats, out_feats, bias=bias)
self.activation = activation
def aggre(self, neighbour):
neighbour = self.linear(neighbour)
if self.activation:
neighbour = self.activation(neighbour)
maxpool_neighbour = torch.max(neighbour, dim=1)[0]
return maxpool_neighbour
class LSTMAggregator(Aggregator):
"""
LSTM aggregator for graphsage
"""
def __init__(self, in_feats, hidden_feats):
super().__init__()
self.lstm = nn.LSTM(in_feats, hidden_feats, batch_first=True)
self.hidden_dim = hidden_feats
self.hidden = self.init_hidden()
nn.init.xavier_uniform_(self.lstm.weight,
gain=nn.init.calculate_gain('relu'))
def init_hidden(self):
"""
Defaulted to initialite all zero
"""
return (torch.zeros(1, 1, self.hidden_dim),
torch.zeros(1, 1, self.hidden_dim))
def aggre(self, neighbours):
"""
aggregation function
"""
# N X F
rand_order = torch.randperm(neighbours.size()[1])
neighbours = neighbours[:, rand_order, :]
(lstm_out, self.hidden) = self.lstm(neighbours.view(neighbours.size()[0], neighbours.size()[1], -1))
return lstm_out[:, -1, :]
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
class NodeApply(nn.Module):
"""
Works -> the node_apply function in DGL paradigm
"""
def __init__(self, in_feats, out_feats, activation, dropout, bias=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.linear = nn.Linear(in_feats * 2, out_feats, bias)
self.activation = activation
def concat(self, h, aggre_result):
bundle = torch.cat((h, aggre_result), 1)
bundle = self.linear(bundle)
return bundle
def forward(self, node):
h = node.data['h']
c = node.data['c']
bundle = self.concat(h, c)
bundle = F.normalize(bundle, p=2, dim=1)
if self.activation:
bundle = self.activation(bundle)
return {"h": bundle}
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GraphSageLayerEdgeFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.src['Bh'] + edges.dst['Bh'] # e_ij = Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h):
h_in = h # for residual connection
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual)
##############################################################
class GraphSageLayerEdgeReprFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.C = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
self.batchnorm_e = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.data['Ce'] + edges.src['Bh'] + edges.dst['Bh'] # e_ij = Ce_ij + Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
e = g.edata['e']
if self.activation:
e = self.activation(e) # non-linear activation
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual)
| 10,938
| 29.386111
| 114
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/mlp_readout_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
MLP Layer used after graph vector representation
"""
class MLPReadout(nn.Module):
def __init__(self, input_dim, output_dim, L=2): #L=nb_hidden_layers
super().__init__()
list_FC_layers = [ nn.Linear( input_dim//2**l , input_dim//2**(l+1) , bias=True ) for l in range(L) ]
list_FC_layers.append(nn.Linear( input_dim//2**L , output_dim , bias=True ))
self.FC_layers = nn.ModuleList(list_FC_layers)
self.L = L
def forward(self, x):
y = x
for l in range(self.L):
y = self.FC_layers[l](y)
y = F.relu(y)
y = self.FC_layers[self.L](y)
return y
# class MLPReadout(nn.Module):
#
# def __init__(self, input_dim, output_dim): # L=nb_hidden_layers
# super().__init__()
# FC_layers = nn.Linear(input_dim, output_dim, bias=True)
#
#
# def forward(self, x):
# y = x
# y = self.FC_layers(y)
# return y
| 1,026
| 26.756757
| 109
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/gated_gcn_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch_geometric.typing import OptTensor
from torch_scatter import scatter
from torch_geometric.nn.conv import MessagePassing
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class GatedGCNLayer(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def message_func(self, edges):
Bh_j = edges.src['Bh']
e_ij = edges.data['Ce'] + edges.src['Dh'] + edges.dst['Eh'] # e_ij = Ce_ij + Dhi + Ehj
edges.data['e'] = e_ij
return {'Bh_j' : Bh_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
Ah_i = nodes.data['Ah']
Bh_j = nodes.mailbox['Bh_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
#h = Ah_i + torch.mean( sigma_ij * Bh_j, dim=1 ) # hi = Ahi + mean_j alpha_ij * Bhj
h = Ah_i + torch.sum( sigma_ij * Bh_j, dim=1 ) / ( torch.sum( sigma_ij, dim=1 ) + 1e-6 ) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
return {'h' : h}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
"""
ResGatedGCN: Residual Gated Graph ConvNets for pyg implement, is made by myself
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class ResGatedGCNLayer(MessagePassing):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
def message(self, x_j: Tensor, alpha_j: Tensor, alpha_i: Tensor, Ah: Tensor ,edge_weight: OptTensor):
e_ij = edge_weight + alpha_j + alpha_i
# e_ij = edges.data['Ce'] + edges.src['Dh'] + edges.dst['Eh'] # e_ij = Ce_ij + Dhi + Ehj
return [x_j, e_ij, Ah]
def aggregate(self, inputs, index, ptr=None, dim_size=None):
Ah_i = inputs[2]
Bh_j = inputs[0]
sigma_ij = torch.sigmoid(inputs[1])
e = inputs[1]
# aa=scatter(sigma_ij * Bh_j, index, dim=self.node_dim, dim_size=dim_size,
# reduce='add')
h = Ah_i + scatter(sigma_ij*Bh_j, index, dim= self.node_dim, dim_size=dim_size,
reduce='add') / (scatter(sigma_ij, index, dim=self.node_dim, dim_size=dim_size, reduce='sum') + 1e-6)
return [h, e]
# hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
def forward(self, h, edge_index, edge_weight):
# h = conv(h, edge_index, e)g, h, e
h_in = h # for residual connection
e_in = edge_weight # for residual connection
Ah = self.A(h)
Bh = self.B(h)
Dh = self.D(h)
Eh = self.E(h)
Ce = self.C(edge_weight)
# g.update_all(self.message_func, self.reduce_func)
m = self.propagate(edge_index, x=(Bh,Bh), alpha=(Dh,Eh), Ah=Ah, edge_weight=Ce,
size=None)
h = m[0] # result of graph convolution
e = m[1] # result of graph convolution
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GatedGCNLayerEdgeFeatOnly(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
def message_func(self, edges):
Bh_j = edges.src['Bh']
e_ij = edges.src['Dh'] + edges.dst['Eh'] # e_ij = Dhi + Ehj
edges.data['e'] = e_ij
return {'Bh_j' : Bh_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
Ah_i = nodes.data['Ah']
Bh_j = nodes.mailbox['Bh_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
h = Ah_i + torch.sum( sigma_ij * Bh_j, dim=1 ) / ( torch.sum( sigma_ij, dim=1 ) + 1e-6 ) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
return {'h' : h}
def forward(self, g, h, e):
h_in = h # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
##############################################################
class GatedGCNLayerIsotropic(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
def message_func(self, edges):
Bh_j = edges.src['Bh']
return {'Bh_j' : Bh_j}
def reduce_func(self, nodes):
Ah_i = nodes.data['Ah']
Bh_j = nodes.mailbox['Bh_j']
h = Ah_i + torch.sum( Bh_j, dim=1 ) # hi = Ahi + sum_j Bhj
return {'h' : h}
def forward(self, g, h, e):
h_in = h # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
| 10,484
| 35.40625
| 170
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/gat_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
class GATLayer(nn.Module):
"""
Parameters
----------
in_dim :
Number of input features.
out_dim :
Number of output features.
num_heads : int
Number of heads in Multi-Head Attention.
dropout :
Required for dropout of attn and feat in GATConv
batch_norm :
boolean flag for batch_norm layer.
residual :
If True, use residual connection inside this layer. Default: ``False``.
activation : callable activation function/layer or None, optional.
If not None, applies an activation function to the updated node features.
Using dgl builtin GATConv by default:
https://github.com/graphdeeplearning/benchmarking-gnns/commit/206e888ecc0f8d941c54e061d5dffcc7ae2142fc
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=False, activation=F.elu):
super().__init__()
self.residual = residual
self.activation = activation
self.batch_norm = batch_norm
if in_dim != (out_dim*num_heads):
self.residual = False
self.gatconv = GATConv(in_dim, out_dim, num_heads, dropout, dropout)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_dim * num_heads)
def forward(self, g, h):
h_in = h # for residual connection
h = self.gatconv(g, h).flatten(1)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
return h
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class CustomGATHeadLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
alpha = F.dropout(alpha, self.dropout, training=self.training)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h):
z = self.fc(h)
g.ndata['z'] = z
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayer(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayer(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
##############################################################
class CustomGATHeadLayerEdgeReprFeat(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc_h = nn.Linear(in_dim, out_dim, bias=False)
self.fc_e = nn.Linear(in_dim, out_dim, bias=False)
self.fc_proj = nn.Linear(3* out_dim, out_dim)
self.attn_fc = nn.Linear(3* out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.batchnorm_e = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z = torch.cat([edges.data['z_e'], edges.src['z_h'], edges.dst['z_h']], dim=1)
e_proj = self.fc_proj(z)
attn = F.leaky_relu(self.attn_fc(z))
return {'attn': attn, 'e_proj': e_proj}
def message_func(self, edges):
return {'z': edges.src['z_h'], 'attn': edges.data['attn']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['attn'], dim=1)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h, e):
z_h = self.fc_h(h)
z_e = self.fc_e(e)
g.ndata['z_h'] = z_h
g.edata['z_e'] = z_e
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
e = g.edata['e_proj']
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
h = F.elu(h)
e = F.elu(e)
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
class CustomGATLayerEdgeReprFeat(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayerEdgeReprFeat(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
head_outs_h = []
head_outs_e = []
for attn_head in self.heads:
h_temp, e_temp = attn_head(g, h, e)
head_outs_h.append(h_temp)
head_outs_e.append(e_temp)
if self.merge == 'cat':
h = torch.cat(head_outs_h, dim=1)
e = torch.cat(head_outs_e, dim=1)
else:
raise NotImplementedError
if self.residual:
h = h_in + h # residual connection
e = e_in + e
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
##############################################################
class CustomGATHeadLayerIsotropic(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def message_func(self, edges):
return {'z': edges.src['z']}
def reduce_func(self, nodes):
h = torch.sum(nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h):
z = self.fc(h)
g.ndata['z'] = z
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayerIsotropic(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayerIsotropic(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
| 10,303
| 29.850299
| 107
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/gin_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
class GINLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GINConv
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggr_type :
Aggregator type to use (``sum``, ``max`` or ``mean``).
out_dim :
Rquired for batch norm layer; should match out_dim of apply_func if not None.
dropout :
Required for dropout of output features.
batch_norm :
boolean flag for batch_norm layer.
residual :
boolean flag for using residual connection.
init_eps : optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func, aggr_type, dropout, batch_norm, residual=False, init_eps=0, learn_eps=False):
super().__init__()
self.apply_func = apply_func
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'max':
self._reducer = fn.max
elif aggr_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
in_dim = apply_func.mlp.input_dim
out_dim = apply_func.mlp.output_dim
if in_dim != out_dim:
self.residual = False
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
else:
self.register_buffer('eps', torch.FloatTensor([init_eps]))
self.bn_node_h = nn.BatchNorm1d(out_dim)
def forward(self, g, h):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = h
g.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
h = (1 + self.eps) * h + g.ndata['neigh']
if self.apply_func is not None:
h = self.apply_func(h)
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h
class ApplyNodeFunc(nn.Module):
"""
This class is used in class GINNet
Update the node feature hv with MLP
"""
def __init__(self, mlp):
super().__init__()
self.mlp = mlp
def forward(self, h):
h = self.mlp(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
self.input_dim = input_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
| 4,598
| 30.9375
| 113
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/gmm_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import dgl.function as fn
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class GMMLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GMMConv
Parameters
----------
in_dim :
Number of input features.
out_dim :
Number of output features.
dim :
Dimensionality of pseudo-coordinte.
kernel :
Number of kernels :math:`K`.
aggr_type :
Aggregator type (``sum``, ``mean``, ``max``).
dropout :
Required for dropout of output features.
batch_norm :
boolean flag for batch_norm layer.
residual :
If True, use residual connection inside this layer. Default: ``False``.
bias :
If True, adds a learnable bias to the output. Default: ``True``.
"""
def __init__(self, in_dim, out_dim, dim, kernel, aggr_type, dropout,
batch_norm, residual=False, bias=True):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.dim = dim
self.kernel = kernel
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'mean':
self._reducer = fn.mean
elif aggr_type == 'max':
self._reducer = fn.max
else:
raise KeyError("Aggregator type {} not recognized.".format(aggr_type))
self.mu = nn.Parameter(torch.Tensor(kernel, dim))
self.inv_sigma = nn.Parameter(torch.Tensor(kernel, dim))
self.fc = nn.Linear(in_dim, kernel * out_dim, bias=False)
self.bn_node_h = nn.BatchNorm1d(out_dim)
if in_dim != out_dim:
self.residual = False
if bias:
self.bias = nn.Parameter(torch.Tensor(out_dim))
else:
self.register_buffer('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = init.calculate_gain('relu')
init.xavier_normal_(self.fc.weight, gain=gain)
init.normal_(self.mu.data, 0, 0.1)
init.constant_(self.inv_sigma.data, 1)
if self.bias is not None:
init.zeros_(self.bias.data)
def forward(self, g, h, pseudo):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = self.fc(h).view(-1, self.kernel, self.out_dim)
E = g.number_of_edges()
# compute gaussian weight
gaussian = -0.5 * ((pseudo.view(E, 1, self.dim) -
self.mu.view(1, self.kernel, self.dim)) ** 2)
gaussian = gaussian * (self.inv_sigma.view(1, self.kernel, self.dim) ** 2)
gaussian = torch.exp(gaussian.sum(dim=-1, keepdim=True)) # (E, K, 1)
g.edata['w'] = gaussian
g.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h'))
h = g.ndata['h'].sum(1)
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
if self.bias is not None:
h = h + self.bias
h = F.dropout(h, self.dropout, training=self.training)
return h
| 3,680
| 31.289474
| 111
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/gcn_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import GraphConv
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
# Sends a message of node feature h
# Equivalent to => return {'m': edges.src['h']}
msg = fn.copy_src(src='h', out='m')
def reduce(nodes):
accum = torch.mean(nodes.mailbox['m'], 1)
return {'h': accum}
class NodeApplyModule(nn.Module):
# Update node feature h_v with (Wh_v+b)
def __init__(self, in_dim, out_dim):
super().__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, node):
h = self.linear(node.data['h'])
return {'h': h}
class GCNLayer(nn.Module):
"""
Param: [in_dim, out_dim]
"""
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, residual=False, dgl_builtin=False):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
if in_dim != out_dim:
self.residual = False
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.activation = activation
self.dropout = nn.Dropout(dropout)
if self.dgl_builtin == False:
self.apply_mod = NodeApplyModule(in_dim, out_dim)
else:
self.conv = GraphConv(in_dim, out_dim)
def forward(self, g, feature):
h_in = feature # to be used for residual connection
if self.dgl_builtin == False:
g.ndata['h'] = feature
g.update_all(msg, reduce)
g.apply_nodes(func=self.apply_mod)
h = g.ndata['h'] # result of graph convolution
else:
h = self.conv(g, feature)
if self.batch_norm:
h = self.batchnorm_h(h) # batch normalization
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
h = self.dropout(h)
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.residual)
| 2,561
| 29.86747
| 109
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/ring_gnn_equiv_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Ring-GNN equi 2 to 2 layer file
On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
https://arxiv.org/pdf/1905.12560v1.pdf
CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
"""
class RingGNNEquivLayer(nn.Module):
def __init__(self, device, input_dim, output_dim, layer_norm, residual, dropout,
normalization='inf', normalization_val=1.0, radius=2, k2_init = 0.1):
super().__init__()
self.device = device
basis_dimension = 15
self.radius = radius
self.layer_norm = layer_norm
self.residual = residual
self.dropout = dropout
coeffs_values = lambda i, j, k: torch.randn([i, j, k]) * torch.sqrt(2. / (i + j).float())
self.diag_bias_list = nn.ParameterList([])
for i in range(radius):
for j in range(i+1):
self.diag_bias_list.append(nn.Parameter(torch.zeros(1, output_dim, 1, 1)))
self.all_bias = nn.Parameter(torch.zeros(1, output_dim, 1, 1))
self.coeffs_list = nn.ParameterList([])
for i in range(radius):
for j in range(i+1):
self.coeffs_list.append(nn.Parameter(coeffs_values(input_dim, output_dim, basis_dimension)))
self.switch = nn.ParameterList([nn.Parameter(torch.FloatTensor([1])), nn.Parameter(torch.FloatTensor([k2_init]))])
self.output_dim = output_dim
self.normalization = normalization
self.normalization_val = normalization_val
if self.layer_norm:
self.ln_x = LayerNorm(output_dim.item())
if self.residual:
self.res_x = nn.Linear(input_dim.item(), output_dim.item())
def forward(self, inputs):
m = inputs.size()[3]
ops_out = ops_2_to_2(inputs, m, normalization=self.normalization)
ops_out = torch.stack(ops_out, dim = 2)
output_list = []
for i in range(self.radius):
for j in range(i+1):
output_i = torch.einsum('dsb,ndbij->nsij', self.coeffs_list[i*(i+1)//2 + j], ops_out)
mat_diag_bias = torch.eye(inputs.size()[3]).unsqueeze(0).unsqueeze(0).to(self.device) * self.diag_bias_list[i*(i+1)//2 + j]
# mat_diag_bias = torch.eye(inputs.size()[3]).to('cuda:0').unsqueeze(0).unsqueeze(0) * self.diag_bias_list[i*(i+1)//2 + j]
if j == 0:
output = output_i + mat_diag_bias
else:
output = torch.einsum('abcd,abde->abce', output_i, output)
output_list.append(output)
output = 0
for i in range(self.radius):
output += output_list[i] * self.switch[i]
output = output + self.all_bias
if self.layer_norm:
# Now, changing shapes from [1xdxnxn] to [nxnxd] for BN
output = output.permute(3,2,1,0).squeeze()
# output = self.bn_x(output.reshape(m*m, self.output_dim.item())) # batch normalization
output = self.ln_x(output) # layer normalization
# Returning output back to original shape
output = output.reshape(m, m, self.output_dim.item())
output = output.permute(2,1,0).unsqueeze(0)
output = F.relu(output) # non-linear activation
if self.residual:
# Now, changing shapes from [1xdxnxn] to [nxnxd] for Linear() layer
inputs, output = inputs.permute(3,2,1,0).squeeze(), output.permute(3,2,1,0).squeeze()
residual_ = self.res_x(inputs)
output = residual_ + output # residual connection
# Returning output back to original shape
output = output.permute(2,1,0).unsqueeze(0)
output = F.dropout(output, self.dropout, training=self.training)
return output
def ops_2_to_2(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m x m
# input: N x D x m x m
diag_part = torch.diagonal(inputs, dim1 = 2, dim2 = 3) # N x D x m
sum_diag_part = torch.sum(diag_part, dim=2, keepdim = True) # N x D x 1
sum_of_rows = torch.sum(inputs, dim=3) # N x D x m
sum_of_cols = torch.sum(inputs, dim=2) # N x D x m
sum_all = torch.sum(sum_of_rows, dim=2) # N x D
# op1 - (1234) - extract diag
op1 = torch.diag_embed(diag_part) # N x D x m x m
# op2 - (1234) + (12)(34) - place sum of diag on diag
op2 = torch.diag_embed(sum_diag_part.repeat(1, 1, dim))
# op3 - (1234) + (123)(4) - place sum of row i on diag ii
op3 = torch.diag_embed(sum_of_rows)
# op4 - (1234) + (124)(3) - place sum of col i on diag ii
op4 = torch.diag_embed(sum_of_cols)
# op5 - (1234) + (124)(3) + (123)(4) + (12)(34) + (12)(3)(4) - place sum of all entries on diag
op5 = torch.diag_embed(sum_all.unsqueeze(2).repeat(1, 1, dim))
# op6 - (14)(23) + (13)(24) + (24)(1)(3) + (124)(3) + (1234) - place sum of col i on row i
op6 = sum_of_cols.unsqueeze(3).repeat(1, 1, 1, dim)
# op7 - (14)(23) + (23)(1)(4) + (234)(1) + (123)(4) + (1234) - place sum of row i on row i
op7 = sum_of_rows.unsqueeze(3).repeat(1, 1, 1, dim)
# op8 - (14)(2)(3) + (134)(2) + (14)(23) + (124)(3) + (1234) - place sum of col i on col i
op8 = sum_of_cols.unsqueeze(2).repeat(1, 1, dim, 1)
# op9 - (13)(24) + (13)(2)(4) + (134)(2) + (123)(4) + (1234) - place sum of row i on col i
op9 = sum_of_rows.unsqueeze(2).repeat(1, 1, dim, 1)
# op10 - (1234) + (14)(23) - identity
op10 = inputs
# op11 - (1234) + (13)(24) - transpose
op11 = torch.transpose(inputs, -2, -1)
# op12 - (1234) + (234)(1) - place ii element in row i
op12 = diag_part.unsqueeze(3).repeat(1, 1, 1, dim)
# op13 - (1234) + (134)(2) - place ii element in col i
op13 = diag_part.unsqueeze(2).repeat(1, 1, dim, 1)
# op14 - (34)(1)(2) + (234)(1) + (134)(2) + (1234) + (12)(34) - place sum of diag in all entries
op14 = sum_diag_part.unsqueeze(3).repeat(1, 1, dim, dim)
# op15 - sum of all ops - place sum of all entries in all entries
op15 = sum_all.unsqueeze(2).unsqueeze(3).repeat(1, 1, dim, dim)
#A_2 = torch.einsum('abcd,abde->abce', inputs, inputs)
#A_4 = torch.einsum('abcd,abde->abce', A_2, A_2)
#op16 = torch.where(A_4>1, torch.ones(A_4.size()), A_4)
if normalization is not None:
float_dim = float(dim)
if normalization is 'inf':
op2 = torch.div(op2, float_dim)
op3 = torch.div(op3, float_dim)
op4 = torch.div(op4, float_dim)
op5 = torch.div(op5, float_dim**2)
op6 = torch.div(op6, float_dim)
op7 = torch.div(op7, float_dim)
op8 = torch.div(op8, float_dim)
op9 = torch.div(op9, float_dim)
op14 = torch.div(op14, float_dim)
op15 = torch.div(op15, float_dim**2)
#return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16]
'''
l = [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
for i, ls in enumerate(l):
print(i+1)
print(torch.sum(ls))
print("$%^&*(*&^%$#$%^&*(*&^%$%^&*(*&^%$%^&*(")
'''
return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
class LayerNorm(nn.Module):
def __init__(self, d):
super().__init__()
self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
def forward(self, x):
# x tensor of the shape n x n x d
mean = x.mean(dim=(0,1), keepdim=True)
var = x.var(dim=(0,1), keepdim=True, unbiased=False)
x = self.a * (x - mean) / torch.sqrt(var + 1e-6) + self.b # shape is n x n x d
return x
| 8,076
| 39.385
| 139
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/layers/three_wl_gnn_layers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Layers used for
3WLGNN
Provably Powerful Graph Networks (Maron et al., 2019)
https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
"""
class RegularBlock(nn.Module):
"""
Imputs: N x input_depth x m x m
Take the input through 2 parallel MLP routes, multiply the result, and add a skip-connection at the end.
At the skip-connection, reduce the dimension back to output_depth
"""
def __init__(self, depth_of_mlp, in_features, out_features, residual=False):
super().__init__()
self.residual = residual
self.mlp1 = MlpBlock(in_features, out_features, depth_of_mlp)
self.mlp2 = MlpBlock(in_features, out_features, depth_of_mlp)
self.skip = SkipConnection(in_features+out_features, out_features)
if self.residual:
self.res_x = nn.Linear(in_features, out_features)
def forward(self, inputs):
mlp1 = self.mlp1(inputs)
mlp2 = self.mlp2(inputs)
mult = torch.matmul(mlp1, mlp2)
out = self.skip(in1=inputs, in2=mult)
if self.residual:
# Now, changing shapes from [1xdxnxn] to [nxnxd] for Linear() layer
inputs, out = inputs.permute(3,2,1,0).squeeze(), out.permute(3,2,1,0).squeeze()
residual_ = self.res_x(inputs)
out = residual_ + out # residual connection
# Returning output back to original shape
out = out.permute(2,1,0).unsqueeze(0)
return out
class MlpBlock(nn.Module):
"""
Block of MLP layers with activation function after each (1x1 conv layers).
"""
def __init__(self, in_features, out_features, depth_of_mlp, activation_fn=nn.functional.relu):
super().__init__()
self.activation = activation_fn
self.convs = nn.ModuleList()
for i in range(depth_of_mlp):
self.convs.append(nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True))
_init_weights(self.convs[-1])
in_features = out_features
def forward(self, inputs):
out = inputs
for conv_layer in self.convs:
out = self.activation(conv_layer(out))
return out
class SkipConnection(nn.Module):
"""
Connects the two given inputs with concatenation
:param in1: earlier input tensor of shape N x d1 x m x m
:param in2: later input tensor of shape N x d2 x m x m
:param in_features: d1+d2
:param out_features: output num of features
:return: Tensor of shape N x output_depth x m x m
"""
def __init__(self, in_features, out_features):
super().__init__()
self.conv = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True)
_init_weights(self.conv)
def forward(self, in1, in2):
# in1: N x d1 x m x m
# in2: N x d2 x m x m
out = torch.cat((in1, in2), dim=1)
out = self.conv(out)
return out
class FullyConnected(nn.Module):
def __init__(self, in_features, out_features, activation_fn=nn.functional.relu):
super().__init__()
self.fc = nn.Linear(in_features, out_features)
_init_weights(self.fc)
self.activation = activation_fn
def forward(self, input):
out = self.fc(input)
if self.activation is not None:
out = self.activation(out)
return out
def diag_offdiag_maxpool(input):
N = input.shape[-1]
max_diag = torch.max(torch.diagonal(input, dim1=-2, dim2=-1), dim=2)[0] # BxS
# with torch.no_grad():
max_val = torch.max(max_diag)
min_val = torch.max(-1 * input)
val = torch.abs(torch.add(max_val, min_val))
min_mat = torch.mul(val, torch.eye(N, device=input.device)).view(1, 1, N, N)
max_offdiag = torch.max(torch.max(input - min_mat, dim=3)[0], dim=2)[0] # BxS
return torch.cat((max_diag, max_offdiag), dim=1) # output Bx2S
def _init_weights(layer):
"""
Init weights of the layer
:param layer:
:return:
"""
nn.init.xavier_uniform_(layer.weight)
# nn.init.xavier_normal_(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
class LayerNorm(nn.Module):
def __init__(self, d):
super().__init__()
self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
def forward(self, x):
# x tensor of the shape n x n x d
mean = x.mean(dim=(0,1), keepdim=True)
var = x.var(dim=(0,1), keepdim=True, unbiased=False)
x = self.a * (x - mean) / torch.sqrt(var + 1e-6) + self.b # shape is n x n x d
return x
| 4,983
| 31.154839
| 108
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/train/train_Planetoid_node_classification.py
|
"""
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
from train.metrics import accuracy_TU as accuracy
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, dataset, train_idx):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
# for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = dataset.dataset[0].x.to(device)
batch_e = dataset.edge_attr.to(device)
batch_labels = dataset.dataset[0].y.long().to(device)
edge_index = dataset.dataset[0].edge_index.long().to(device)
train_idx = train_idx.to(device)
optimizer.zero_grad()
batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
loss = model.loss(batch_scores, batch_labels[train_idx]).to(torch.float)
loss.backward()
optimizer.step()
epoch_loss = loss.detach().item()
epoch_train_acc = accuracy(batch_scores, batch_labels[train_idx]) / train_idx.size(0)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, dataset, val_idx):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
batch_x = dataset.dataset[0].x.to(device)
batch_e = dataset.edge_attr.to(device)
batch_labels = dataset.dataset[0].y.long().to(device)
edge_index = dataset.dataset[0].edge_index.long().to(device)
val_idx = val_idx.to(device)
batch_scores = model.forward(batch_x, edge_index, batch_e)[val_idx]
loss = model.loss(batch_scores, batch_labels[val_idx]).to(torch.float)
epoch_test_loss = loss.detach().item()
epoch_test_acc = accuracy(batch_scores, batch_labels[val_idx]) / val_idx.size(0)
return epoch_test_loss, epoch_test_acc
# """
# For WL-GNNs
# """
# def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
# model.train()
# epoch_loss = 0
# epoch_train_acc = 0
# nb_data = 0
# gpu_mem = 0
# optimizer.zero_grad()
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# loss.backward()
#
# if not (iter%batch_size):
# optimizer.step()
# optimizer.zero_grad()
#
# epoch_loss += loss.detach().item()
# epoch_train_acc += accuracy(scores, labels)
# nb_data += labels.size(0)
# epoch_loss /= (iter + 1)
# epoch_train_acc /= nb_data
#
# return epoch_loss, epoch_train_acc, optimizer
#
# def evaluate_network_dense(model, device, data_loader, epoch):
# model.eval()
# epoch_test_loss = 0
# epoch_test_acc = 0
# nb_data = 0
# with torch.no_grad():
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# epoch_test_loss += loss.detach().item()
# epoch_test_acc += accuracy(scores, labels)
# nb_data += labels.size(0)
# epoch_test_loss /= (iter + 1)
# epoch_test_acc /= nb_data
#
# return epoch_test_loss, epoch_test_acc
def check_patience(all_losses, best_loss, best_epoch, curr_loss, curr_epoch, counter):
if curr_loss < best_loss:
counter = 0
best_loss = curr_loss
best_epoch = curr_epoch
else:
counter += 1
return best_loss, best_epoch, counter
| 3,774
| 30.991525
| 89
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/train/metrics.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import numpy as np
def MAE(scores, targets):
MAE = F.l1_loss(scores, targets)
MAE = MAE.detach().item()
return MAE
# it is the original one to calculate the value, have found the ogb evaluator use the same way. There we use this one to calculate.
def accuracy_TU(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
return acc
def accuracy_MNIST_CIFAR(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
return acc
def accuracy_CITATION_GRAPH(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
acc = acc / len(targets)
return acc
# it takes into account the case of each class, is the sum of the accuracy of each class(the right divide the total in each class) then
# divided by the total number of classes
def accuracy_SBM(scores, targets):
S = targets.cpu().numpy()
C = np.argmax( torch.nn.Softmax(dim=1)(scores).cpu().detach().numpy() , axis=1 )
CM = confusion_matrix(S,C).astype(np.float32)
nb_classes = CM.shape[0]
targets = targets.cpu().detach().numpy()
nb_non_empty_classes = 0
pr_classes = np.zeros(nb_classes)
for r in range(nb_classes):
cluster = np.where(targets==r)[0]
if cluster.shape[0] != 0:
pr_classes[r] = CM[r,r]/ float(cluster.shape[0])
if CM[r,r]>0:
nb_non_empty_classes += 1
else:
pr_classes[r] = 0.0
acc = 100.* np.sum(pr_classes)/ float(nb_classes)
return acc
def accuracy_ogb(y_pred, y_true):
acc_list = []
# y_true = data.y
# y_pred = y_pred.argmax(dim=-1, keepdim=True)
# for i in range(y_true.shape[0]):
# is_labeled = y_true[:, i] == y_true[:, i]
# correct = y_true[is_labeled, i] == y_pred[is_labeled, i]
# acc_list.append(float(np.sum(correct)) / len(correct))
y_pred = y_pred.detach().argmax(dim=1)
acc = (y_pred == y_true).float().sum().item()
return acc
def binary_f1_score(scores, targets):
"""Computes the F1 score using scikit-learn for binary class labels.
Returns the F1 score for the positive class, i.e. labelled '1'.
"""
y_true = targets.cpu().numpy()
y_pred = scores.argmax(dim=1).cpu().numpy()
return f1_score(y_true, y_pred, average='binary')
def accuracy_VOC(scores, targets):
scores = scores.detach().argmax(dim=1).cpu()
targets = targets.cpu().detach().numpy()
acc = f1_score(scores, targets, average='weighted')
return acc
| 2,754
| 31.797619
| 135
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/train/train_ogb_node_classification.py
|
"""
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from tqdm import tqdm
from train.metrics import accuracy_SBM as accuracy
from train.metrics import accuracy_ogb
from ogb.nodeproppred import Evaluator
"""
For GCNs
"""
def train_epoch(model, optimizer, device, train_loader, epoch=None):
model.train()
# pbar = tqdm(total=len(train_loader))
# pbar.set_description(f'Training epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(data.x, data.edge_index, data.edge_attr,batch_pos_enc)
except:
batch_scores = model(data.x, data.edge_index, data.edge_attr)
loss = model.loss(batch_scores[data.train_mask], data.y.view(-1)[data.train_mask]).to(torch.float)
loss.backward()
optimizer.step()
total_loss += float(loss) * int(data.train_mask.sum())
total_examples += int(data.train_mask.sum())
# pbar.update(1)
#
# pbar.close()
return total_loss / total_examples
# model.train()
#
# # for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
# batch_x = dataset.x.to(device)
# batch_e = dataset.edge_attr.to(device)
# # batch_e = dataset.edge_attr
# batch_labels = dataset.y.long().to(device)
# edge_index = dataset.edge_index.long().to(device)
# train_idx = train_idx.to(device)
#
# optimizer.zero_grad()
# batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
# loss = model.loss(batch_scores, batch_labels.view(-1)[train_idx]).to(torch.float)
# loss.backward()
# optimizer.step()
# epoch_loss = loss.detach().item()
#
# return epoch_loss
def train_epoch_arxiv(model, optimizer, device, dataset, train_idx):
model.train()
# for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = dataset.x.to(device)
batch_e = dataset.edge_attr.to(device)
# batch_e = dataset.edge_attr
batch_labels = dataset.y.long().to(device)
edge_index = dataset.edge_index.long().to(device)
train_idx = train_idx.to(device)
optimizer.zero_grad()
try:
batch_pos_enc = dataset.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(batch_x, edge_index, batch_e, batch_pos_enc)[train_idx]
except:
batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
loss = model.loss(batch_scores, batch_labels.view(-1)[train_idx]).to(torch.float)
loss.backward()
optimizer.step()
epoch_loss = loss.detach().item()
return epoch_loss
def train_epoch_proteins(model, optimizer, device, train_loader, epoch=None):
model.train()
# pbar = tqdm(total=len(train_loader))
# pbar.set_description(f'Training epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
out = model.forward(data.x, data.edge_index, data.edge_attr,batch_pos_enc)
except:
out = model(data.x, data.edge_index, data.edge_attr)
loss = model.loss_proteins(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
total_loss += float(loss) * int(data.train_mask.sum())
total_examples += int(data.train_mask.sum())
# pbar.update(1)
#
# pbar.close()
return total_loss / total_examples
@torch.no_grad()
def evaluate_network(model, device, test_loader, evaluator, epoch):
model.eval()
y_true = {'train': [], 'valid': [], 'test': []}
y_pred = {'train': [], 'valid': [], 'test': []}
# pbar = tqdm(total=len(test_loader))
# pbar.set_description(f'Evaluating epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in test_loader:
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
out = model.forward(data.x, data.edge_index.long(), data.edge_attr, batch_pos_enc)
except:
out = model.forward(data.x, data.edge_index.long(), data.edge_attr)
# out = model(data.x, data.edge_index.long(), data.edge_attr)
for split in y_true.keys():
mask = data[f'{split}_mask']
y_true[split].append(data.y[mask].cpu())
y_pred[split].append(out[mask].argmax(dim=-1, keepdim=True).cpu())
loss = model.loss(out[data.valid_mask], data.y.view(-1)[data.valid_mask])
total_loss += float(loss) * int(data.valid_mask.sum())
total_examples += int(data.valid_mask.sum())
# pbar.update(1)
# pbar.close()
train_acc = evaluator.eval({
'y_true': torch.cat(y_true['train'], dim=0),
'y_pred': torch.cat(y_pred['train'], dim=0),
})['acc']
valid_acc = evaluator.eval({
'y_true': torch.cat(y_true['valid'], dim=0),
'y_pred': torch.cat(y_pred['valid'], dim=0),
})['acc']
test_acc = evaluator.eval({
'y_true': torch.cat(y_true['test'], dim=0),
'y_pred': torch.cat(y_pred['test'], dim=0),
})['acc']
return train_acc, valid_acc, test_acc, total_loss / total_examples
# model.train()
#
# # for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
# batch_x = dataset.x.to(device)
# batch_e = dataset.edge_attr.to(device)
# batch_labels = dataset.y.long().to(device)
# edge_index = dataset.edge_index.long().to(device)
# train_idx = train_idx.to(device)
#
# optimizer.zero_grad()
# batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
# loss = model.loss_proteins(batch_scores, batch_labels[train_idx]).to(torch.float)
# loss.backward()
# optimizer.step()
# epoch_loss = loss.detach().item()
#
# return epoch_loss
@torch.no_grad()
def evaluate_network_arxiv(model, device, dataset, evaluator):
model.eval()
batch_x = dataset.dataset[0].x.to(device)
y_true = dataset.dataset[0].y.long().to(device)
split_idx = dataset.split_idx
batch_e = dataset.dataset[0].edge_attr.to(device)
edge_index = dataset.dataset[0].edge_index.long().to(device)
try:
batch_pos_enc = dataset.dataset[0].pos_enc.to(device)
batch_scores = model.forward(batch_x, edge_index, batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_x, edge_index, batch_e)
# batch_scores = model.forward(batch_x, edge_index, batch_e)
loss = model.loss(batch_scores[split_idx['valid']], y_true.view(-1)[split_idx['valid']]).to(torch.float)
epoch_valid_loss = loss.detach().item()
# y_pred = batch_scores
y_pred = batch_scores.argmax(dim=-1, keepdim=True)
# y_true = y_true.view(-1, 1)
y_true = y_true.view(-1, 1)
train_acc = evaluator.eval({
'y_true': y_true[split_idx['train']],
'y_pred': y_pred[split_idx['train']],
})['acc']
valid_acc = evaluator.eval({
'y_true': y_true[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
return train_acc, valid_acc, test_acc, epoch_valid_loss
@torch.no_grad()
def evaluate_network_proteins(model, device, test_loader, evaluator, epoch = None):
model.eval()
y_true = {'train': [], 'valid': [], 'test': []}
y_pred = {'train': [], 'valid': [], 'test': []}
# pbar = tqdm(total=len(test_loader))
# pbar.set_description(f'Evaluating epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in test_loader:
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
out = model.forward(data.x, data.edge_index, data.edge_attr, batch_pos_enc)
except:
out = model.forward(data.x, data.edge_index, data.edge_attr)
# out = model(data.x, data.edge_index, data.edge_attr)
for split in y_true.keys():
mask = data[f'{split}_mask']
y_true[split].append(data.y[mask].cpu())
y_pred[split].append(out[mask].cpu())
loss = model.loss_proteins(out[data.valid_mask], data.y[data.valid_mask])
total_loss += float(loss) * int(data.valid_mask.sum())
total_examples += int(data.valid_mask.sum())
# pbar.update(1)
# pbar.close()
train_rocauc = evaluator.eval({
'y_true': torch.cat(y_true['train'], dim=0),
'y_pred': torch.cat(y_pred['train'], dim=0),
})['rocauc']
valid_rocauc = evaluator.eval({
'y_true': torch.cat(y_true['valid'], dim=0),
'y_pred': torch.cat(y_pred['valid'], dim=0),
})['rocauc']
test_rocauc = evaluator.eval({
'y_true': torch.cat(y_true['test'], dim=0),
'y_pred': torch.cat(y_pred['test'], dim=0),
})['rocauc']
return train_rocauc, valid_rocauc, test_rocauc, total_loss / total_examples
#
# model.eval()
# batch_x = dataset.dataset[0].x.to(device)
# y_true = dataset.dataset[0].y.long().to(device)
# split_idx = dataset.split_idx
# batch_e = dataset.dataset[0].edge_attr.to(device)
# edge_index = dataset.dataset[0].edge_index.long().to(device)
#
# batch_scores = model.forward(batch_x, edge_index, batch_e)
# loss = model.loss_proteins(batch_scores[split_idx['valid']], y_true[split_idx['valid']]).to(torch.float)
# epoch_valid_loss = loss.detach().item()
# y_pred = batch_scores
# # y_pred = batch_scores.argmax(dim=-1, keepdim=True)
# # y_true = y_true.view(-1, 1)
# train_acc = evaluator.eval({
# 'y_true': y_true[split_idx['train']],
# 'y_pred': y_pred[split_idx['train']],
# })['rocauc']
# valid_acc = evaluator.eval({
# 'y_true': y_true[split_idx['valid']],
# 'y_pred': y_pred[split_idx['valid']],
# })['rocauc']
# test_acc = evaluator.eval({
# 'y_true': y_true[split_idx['test']],
# 'y_pred': y_pred[split_idx['test']],
# })['rocauc']
#
# return train_acc, valid_acc, test_acc, epoch_valid_loss
| 11,004
| 35.440397
| 110
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/train/train_SBMs_node_classification.py
|
"""
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from train.metrics import accuracy_SBM as accuracy
from train.metrics import accuracy_ogb
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, framework = 'pyg'):
model.train()
epoch_loss = 0
epoch_train_acc = 0
epoch_train_acc_ogb = 0
nb_data = 0
gpu_mem = 0
if framework == 'pyg':
for iter, batch_graphs in enumerate(data_loader):
batch_x = batch_graphs.x.to(device) # num x feat
edge_index = batch_graphs.edge_index.to(device)
batch_e = batch_graphs.edge_attr.to(device)
batch_labels = batch_graphs.y.long().to(device)
# batch_graphs = batch_graphs.to(device) # add to satisfy the version to put the graph to the cuda
optimizer.zero_grad()
try:
batch_pos_enc = batch_graphs.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(batch_x, edge_index, batch_e, batch_pos_enc)
except:
# batch_scores = model.forward(batch_graphs.x, batch_graphs.edge_index)
batch_scores = model.forward(batch_x, edge_index, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
# epoch_train_acc_ogb += accuracy_ogb(batch_scores, batch_labels)
# nb_data += batch_labels.size(0)
# print("Number: ", iter)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
# epoch_train_acc_ogb /= nb_data
return epoch_loss, epoch_train_acc, optimizer
elif framework == 'dgl':
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
batch_graphs = batch_graphs.to(device) #add to satisfy the version to put the graph to the cuda
optimizer.zero_grad()
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
# print("Number: ", iter)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, framework = 'pyg'):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
if framework == 'pyg':
with torch.no_grad():
for iter, batch_graphs in enumerate(data_loader):
batch_x = batch_graphs.x.to(device) # num x feat
edge_index = batch_graphs.edge_index.to(device)
batch_e = batch_graphs.edge_attr.to(device)
batch_labels = batch_graphs.y.long().to(device)
try:
batch_pos_enc = batch_graphs.pos_enc.to(device)
batch_scores = model.forward(batch_x, edge_index,batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_x, edge_index,batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
elif framework == 'dgl':
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
batch_graphs = batch_graphs.to(device) # add to satisfy the version to put the graph to the cuda
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
"""
For WL-GNNs
"""
# def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
#
# model.train()
# epoch_loss = 0
# epoch_train_acc = 0
# nb_data = 0
# gpu_mem = 0
# optimizer.zero_grad()
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# loss.backward()
#
# if not (iter%batch_size):
# optimizer.step()
# optimizer.zero_grad()
#
# epoch_loss += loss.detach().item()
# epoch_train_acc += accuracy(scores, labels)
# epoch_loss /= (iter + 1)
# epoch_train_acc /= (iter + 1)
#
# return epoch_loss, epoch_train_acc, optimizer
#
#
#
# def evaluate_network_dense(model, device, data_loader, epoch):
#
# model.eval()
# epoch_test_loss = 0
# epoch_test_acc = 0
# nb_data = 0
# with torch.no_grad():
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# epoch_test_loss += loss.detach().item()
# epoch_test_acc += accuracy(scores, labels)
# epoch_test_loss /= (iter + 1)
# epoch_test_acc /= (iter + 1)
#
# return epoch_test_loss, epoch_test_acc
| 7,186
| 38.489011
| 113
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/utils/cleaner_main.py
|
# Clean the main.py file after conversion from notebook.
# Any notebook code is removed from the main.py file.
import subprocess
def cleaner_main(filename):
# file names
file_notebook = filename + '.ipynb'
file_python = filename + '.py'
# convert notebook to python file
print('Convert ' + file_notebook + ' to ' + file_python)
subprocess.check_output('jupyter nbconvert --to script ' + str(file_notebook) , shell=True)
print('Clean ' + file_python)
# open file
with open(file_python, "r") as f_in:
lines_in = f_in.readlines()
# remove cell indices
lines_in = [ line for i,line in enumerate(lines_in) if '# In[' not in line ]
# remove comments
lines_in = [ line for i,line in enumerate(lines_in) if line[0]!='#' ]
# remove "in_ipynb()" function
idx_start_fnc = next((i for i, x in enumerate(lines_in) if 'def in_ipynb' in x), None)
if idx_start_fnc!=None:
idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:4] not in ['\n',' ']), None)
lines_in = [ line for i,line in enumerate(lines_in) if i not in range(idx_start_fnc,idx_end_fnc+1) ]
list_elements_to_remove = ['in_ipynb()', 'print(notebook_mode)']
for elem in list_elements_to_remove:
lines_in = [ line for i,line in enumerate(lines_in) if elem not in line ]
# unindent "if notebook_mode==False" block
idx_start_fnc = next((i for i, x in enumerate(lines_in) if 'if notebook_mode==False' in x), None)
if idx_start_fnc!=None:
idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:8] not in ['\n',' ']), None)
for i in range(idx_start_fnc,idx_end_fnc+1):
lines_in[i] = lines_in[i][4:]
lines_in.pop(idx_start_fnc)
list_elements_to_remove = ['# notebook mode', '# terminal mode']
for elem in list_elements_to_remove:
lines_in = [ line for i,line in enumerate(lines_in) if elem not in line ]
# remove remaining "if notebook_mode==True" blocks - single indent
run = True
while run:
idx_start_fnc = next((i for i, x in enumerate(lines_in) if x[:16]=='if notebook_mode'), None)
if idx_start_fnc!=None:
idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:4] not in ['\n',' ']), None)
lines_in = [ line for i,line in enumerate(lines_in) if i not in range(idx_start_fnc,idx_end_fnc+1) ]
else:
run = False
# remove "if notebook_mode==True" block - double indents
idx_start_fnc = next((i for i, x in enumerate(lines_in) if x[:20]==' if notebook_mode'), None)
if idx_start_fnc!=None:
idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:8] not in ['\n',' ']), None)
lines_in = [ line for i,line in enumerate(lines_in) if i not in range(idx_start_fnc,idx_end_fnc+1) ]
# prepare main() for terminal mode
idx = next((i for i, x in enumerate(lines_in) if 'def main' in x), None)
if idx!=None: lines_in[idx] = 'def main():'
idx = next((i for i, x in enumerate(lines_in) if x[:5]=='else:'), None)
if idx!=None: lines_in.pop(idx)
idx = next((i for i, x in enumerate(lines_in) if x[:10]==' main()'), None)
if idx!=None: lines_in[idx] = 'main()'
# remove notebook variables
idx = next((i for i, x in enumerate(lines_in) if 'use_gpu = True' in x), None)
if idx!=None: lines_in.pop(idx)
idx = next((i for i, x in enumerate(lines_in) if 'gpu_id = -1' in x), None)
if idx!=None: lines_in.pop(idx)
idx = next((i for i, x in enumerate(lines_in) if 'device = None' in x), None)
if idx!=None: lines_in.pop(idx)
run = True
while run:
idx = next((i for i, x in enumerate(lines_in) if x[:10]=='MODEL_NAME'), None)
if idx!=None:
lines_in.pop(idx)
else:
run = False
# save clean file
lines_out = str()
for line in lines_in: lines_out += line
with open(file_python, 'w') as f_out:
f_out.write(lines_out)
print('Done. ')
| 3,939
| 37.252427
| 136
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/ogbn.py
|
import time
import os
import pickle
import numpy as np
import os.path as osp
import dgl
import torch
from torch_scatter import scatter
from scipy import sparse as sp
import numpy as np
from tqdm import tqdm
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data
from scipy.sparse import csr_matrix
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from torch_geometric.utils import get_laplacian
# to_undirected
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_sparse import coalesce
class load_SBMsDataSetDGL(torch.utils.data.Dataset):
def __init__(self,
data_dir,
name,
split):
self.split = split
self.is_test = split.lower() in ['test', 'val']
with open(os.path.join(data_dir, name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
self.node_labels = []
self.graph_lists = []
self.n_samples = len(self.dataset)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.n_samples, self.split.upper()))
for data in self.dataset:
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(node_features.size(0))
g.ndata['feat'] = node_features.long()
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
# adding edge features for Residual Gated ConvNet
#edge_feat_dim = g.ndata['feat'].size(1) # dim same as node feature dim
edge_feat_dim = 1 # dim same as node feature dim
g.edata['feat'] = torch.ones(g.number_of_edges(), edge_feat_dim)
self.graph_lists.append(g)
self.node_labels.append(data.node_label)
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.node_labels[idx]
class PygNodeSBMsDataset(InMemoryDataset):
def __init__(self,
data_dir,
name,
split,
transform = None,
pre_transform = None,
meta_dict = None
):
self.split = split
self.root = data_dir
self.name = name
self.is_test = split.lower() in ['test', 'val']
self.node_labels = []
self.graph_lists = []
super(PygNodeSBMsDataset, self).__init__(self.root, transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root)
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
return [os.path.join(self.name + '_%s.pkl' % self.split)]
@property
def processed_file_names(self):
return 'geometric_data_processed' + self.name + self.split + '.pt'
def download(self):
r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
raise NotImplementedError
def process(self):
with open(os.path.join(self.root, self.name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
# self.n_samples = len(self.dataset)
print("preparing graphs for the %s set..." % (self.split.upper()))
print('Converting graphs into PyG objects...')
pyg_graph_list = []
for data in tqdm(self.dataset):
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
g = Data()
g.__num_nodes__ = node_features.size(0)
g.edge_index = edge_list.T
#g.edge_index = torch.from_numpy(edge_list)
g.x = node_features.long()
# adding edge features for Residual Gated ConvNet
edge_feat_dim = 1
g.edge_attr = torch.ones(g.num_edges, edge_feat_dim)
g.y = data.node_label.to(torch.float32)
pyg_graph_list.append(g)
del self.dataset
data, slices = self.collate(pyg_graph_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
# def size_repr(key, item, indent=0):
# indent_str = ' ' * indent
# if torch.is_tensor(item) and item.dim() == 0:
# out = item.item()
# elif torch.is_tensor(item):
# out = str(list(item.size()))
# elif isinstance(item, list) or isinstance(item, tuple):
# out = str([len(item)])
# elif isinstance(item, dict):
# lines = [indent_str + size_repr(k, v, 2) for k, v in item.items()]
# out = '{\n' + ',\n'.join(lines) + '\n' + indent_str + '}'
# elif isinstance(item, str):
# out = f'"{item}"'
# else:
# out = str(item)
#
# return f'{indent_str}{key}={out}'
#
# class PygNodeSBMsDataset(InMemoryDataset):
#
# def __init__(self,
# data_dir,
# name,
# split,
# transform = None,
# pre_transform = None,
# meta_dict = None
# ):
#
# self.split = split
# self.root = data_dir
# self.name = name
# self.is_test = split.lower() in ['test', 'val']
#
# self.node_labels = []
# self.graph_lists = []
# super(PygNodeSBMsDataset, self).__init__(self.root, transform)
# self.data, self.slices = torch.load(self.processed_paths[0])
#
#
# @property
# def raw_dir(self):
# return osp.join(self.root)
#
# @property
# def num_classes(self):
# return self.__num_classes__
#
# @property
# def raw_file_names(self):
# return [os.path.join(self.name + '_%s.pkl' % self.split)]
#
# @property
# def processed_file_names(self):
# return 'geometric_data_processed' + self.name + self.split + '.pt'
#
# def download(self):
# r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
# raise NotImplementedError
#
# def process(self):
# with open(os.path.join(self.root, self.name + '_%s.pkl' % self.split), 'rb') as f:
# self.dataset = pickle.load(f)
# # self.n_samples = len(self.dataset)
# print("preparing graphs for the %s set..." % (self.split.upper()))
# print('Converting graphs into PyG objects...')
# pyg_graph_list = []
# for data in tqdm(self.dataset):
# node_features = data.node_feat
# edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
# g = Data()
# g.__num_nodes__ = node_features.size(0)
# g.edge_index = edge_list.T
# #g.edge_index = torch.from_numpy(edge_list)
# g.x = node_features.long()
# # adding edge features for Residual Gated ConvNet
# edge_feat_dim = 1
# g.edge_attr = torch.ones(g.num_edges, edge_feat_dim)
# g.y = data.node_label.to(torch.float32)
# pyg_graph_list.append(g)
# del self.dataset
# data, slices = self.collate(pyg_graph_list)
# print('Saving...')
# torch.save((data, slices), self.processed_paths[0])
#
# def __repr__(self):
# return '{}()'.format(self.__class__.__name__)
class Data_idx(object):
def __init__(self, x=None, edge_index=None, edge_attr=None, y=None, pos_enc = None,
**kwargs):
self.x = x
self.edge_index = edge_index
self.y = y
self.edge_attr = edge_attr
self.pos_enc = pos_enc
def __len__(self):
r"""The number of examples in the dataset."""
return 1
def __getitem__(self, idx):
# data = self.data.__class__()
dataset = Data(
x = self.x,
edge_index = self.edge_index,
y=self.y,
edge_attr=self.edge_attr) if self.pos_enc == None else Data(x = self.x,
edge_index = self.edge_index,
y=self.y,
edge_attr=self.edge_attr,
pos_enc=self.pos_enc)
return dataset
#
# def __repr__(self):
# cls = str(self.__class__.__name__)
# has_dict = any([isinstance(item, dict) for _, item in self])
#
# if not has_dict:
# info = [size_repr(key, item) for key, item in self]
# return '{}({})'.format(cls, ', '.join(info))
# else:
# info = [size_repr(key, item, indent=2) for key, item in self]
# return '{}(\n{}\n)'.format(cls, ',\n'.join(info))
def to_undirected(edge_index, num_nodes=None):
r"""Converts the graph given by :attr:`edge_index` to an undirected graph,
so that :math:`(j,i) \in \mathcal{E}` for every edge :math:`(i,j) \in
\mathcal{E}`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: :class:`LongTensor`
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index
value_ori = torch.full([row.size(0)], 2)
value_add = torch.full([col.size(0)], 1)
row, col = torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)
edge_attr = torch.cat([value_ori, value_add], dim=0)
edge_index = torch.stack([row, col], dim=0)
edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes)
return edge_index, edge_attr.view(-1,1).type(torch.float32)
class ogbnDatasetpyg(InMemoryDataset):
def __init__(self, name, use_node_embedding = False):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/ogbn'
# edge_index = self.dataset[0].edge_index
# dataset = PygNodePropPredDataset(name=name, root=data_dir)
if name == 'ogbn-arxiv':
self.dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = self.dataset.get_idx_split()
self.dataset.data.edge_index, self.dataset.data.edge_attr = to_undirected(self.dataset[0].edge_index, self.dataset[0].num_nodes)
self.dataset.data.y = self.dataset.data.y.squeeze(1)
self.dataset.slices['edge_index'] = torch.tensor([0,self.dataset.data.edge_index.size(1)],dtype=torch.long)
self.dataset.slices['edge_attr'] = torch.tensor([0, self.dataset.data.edge_index.size(1)],
dtype=torch.long)
if name == 'ogbn-proteins':
self.dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = self.dataset.get_idx_split()
self.dataset.data.x = scatter(self.dataset[0].edge_attr, self.dataset[0].edge_index[0], dim=0,
dim_size=self.dataset[0].num_nodes, reduce='mean').to('cpu')
self.dataset.slices['x'] = torch.tensor([0, self.dataset.data.y.size(0)],
dtype=torch.long)
# self.edge_attr = self.dataset[0].edge_attr
edge_feat_dim = 1
self.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if name == 'ogbn-mag':
dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = dataset.get_idx_split()
rel_data = dataset[0]
edge_index, self.edge_attr = to_undirected(rel_data.edge_index_dict[('paper', 'cites', 'paper')],
rel_data.num_nodes['paper'])
# We are only interested in paper <-> paper relations.
self.dataset = Data_idx(
x=rel_data.x_dict['paper'],
edge_index=edge_index,
y=rel_data.y_dict['paper'],
edge_attr = self.edge_attr)
if name == 'ogbn-products':
self.dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = self.dataset.get_idx_split()
self.dataset.slices['edge_attr'] = torch.tensor([0, self.dataset.data.edge_index.size(1)],
dtype=torch.long)
edge_feat_dim = 1
self.dataset.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if use_node_embedding:
print("use_node_embedding ...")
embedding = torch.load('data/ogbn/embedding_' + name[5:] + '.pt', map_location='cpu')
self.dataset.data.x = torch.cat([self.dataset.data.x, embedding], dim=-1)
# self.dataset.data.embedding = embedding#torch.cat([self.dataset.data.x, embedding], dim=-1)
# self.dataset.slices['embedding'] = torch.tensor([0, self.dataset.data.x.size(0)],
# dtype=torch.long)
# edge_attr.type =
# edge_feat_dim = 1
# self.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim)
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def _add_positional_encodings(self, pos_enc_dim, DATASET_NAME = None):
# Graph positional encoding v/ Laplacian eigenvectors
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
# iter(self.train)
self.graph_lists = [positional_encoding(self.dataset[0], pos_enc_dim, DATASET_NAME = DATASET_NAME)]
self.dataset.data, self.dataset.slices = self.collate(self.graph_lists)
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class SBMsDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/SBMs'
self.train = load_SBMsDataSetDGL(data_dir, name, split='train')
self.test = load_SBMsDataSetDGL(data_dir, name, split='test')
self.val = load_SBMsDataSetDGL(data_dir, name, split='val')
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in SBMsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim, DATASET_NAME = None):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian,for the pyg
try:
g.pos_enc = torch.load('data/ogbn/laplacian_'+DATASET_NAME[5:]+'.pt', map_location='cpu')
if g.pos_enc.size(1) != pos_enc_dim:
os.remove('data/ogbn/laplacian_'+DATASET_NAME[5:]+'.pt')
L = get_laplacian(g.edge_index, normalization='sym', dtype=torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
torch.save(g.pos_enc.cpu(), 'data/ogbn/laplacian_' + DATASET_NAME[5:] + '.pt')
except:
L = get_laplacian(g.edge_index,normalization='sym',dtype = torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
torch.save(g.pos_enc.cpu(), 'data/ogbn/laplacian_'+DATASET_NAME[5:]+'.pt')
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
# # Eigenvectors with numpy
# EigVal, EigVec = np.linalg.eig(L.toarray())
# idx = EigVal.argsort() # increasing order
# EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class SBMsDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/SBMs/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs which use; such as RingGNN and 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if self.name == 'SBM_CLUSTER':
self.num_node_type = 7
elif self.name == 'SBM_PATTERN':
self.num_node_type = 3
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(self.num_node_type)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_node_feat[node_label.item()+1][node][node] = 1
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.test.graph_lists]
| 22,737
| 38.407279
| 140
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/molecules.py
|
import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
import csv
import dgl
from scipy import sparse as sp
import numpy as np
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset
from tqdm import tqdm
# *NOTE
# The dataset pickle and index files are in ./zinc_molecules/ dir
# [<split>.pickle and <split>.index; for split 'train', 'val' and 'test']
class MoleculeDGL(torch.utils.data.Dataset):
def __init__(self, data_dir, split, num_graphs):
self.data_dir = data_dir
self.split = split
self.num_graphs = num_graphs
with open(data_dir + "/%s.pickle" % self.split,"rb") as f:
self.data = pickle.load(f)
# loading the sampled indices from file ./zinc_molecules/<split>.index
with open(data_dir + "/%s.index" % self.split,"r") as f:
data_idx = [list(map(int, idx)) for idx in csv.reader(f)]
self.data = [ self.data[i] for i in data_idx[0] ]
assert len(self.data)==num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k"
"""
data is a list of Molecule dict objects with following attributes
molecule = data[idx]
; molecule['num_atom'] : nb of atoms, an integer (N)
; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type
; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type
; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable
"""
self.graph_lists = []
self.graph_labels = []
self.n_samples = len(self.data)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper()))
for molecule in self.data:
node_features = molecule['atom_type'].long()
adj = molecule['bond_type']
edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list
edge_idxs_in_adj = edge_list.split(1, dim=1)
edge_features = adj[edge_idxs_in_adj].reshape(-1).long()
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(molecule['num_atom'])
g.ndata['feat'] = node_features
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
g.edata['feat'] = edge_features
self.graph_lists.append(g)
self.graph_labels.append(molecule['logP_SA_cycle_normalized'])
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.graph_labels[idx]
class Moleculepyg(InMemoryDataset):
def __init__(self, data_dir, split, num_graphs, name, root = 'dataset', transform=None, pre_transform=None, meta_dict = None):
self.data_dir = data_dir
self.split = split
self.num_graphs = num_graphs
self.root = root
self.name = name
with open(data_dir + "/%s.pickle" % self.split, "rb") as f:
self.data = pickle.load(f)
# loading the sampled indices from file ./zinc_molecules/<split>.index
with open(data_dir + "/%s.index" % self.split, "r") as f:
data_idx = [list(map(int, idx)) for idx in csv.reader(f)]
self.data = [self.data[i] for i in data_idx[0]]
assert len(self.data) == num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k"
"""
data is a list of Molecule dict objects with following attributes
molecule = data[idx]
; molecule['num_atom'] : nb of atoms, an integer (N)
; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type
; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type
; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable
"""
self.n_samples = len(self.data)
super(Moleculepyg, self).__init__(self.root, transform)
self.data, self.slices = torch.load(self.processed_paths[0])
#self.process()
@property
def processed_file_names(self):
return 'geometric_data_processed' + self.name + self.split + '.pt'
def process(self):
print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper()))
print('Converting graphs into PyG objects...')
pyg_graph_list = []
graph_labels = []
for graph in tqdm(self.data):
node_features = graph['atom_type'].long()
adj = graph['bond_type']
edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list
edge_idxs_in_adj = edge_list.split(1, dim=1)
edge_features = adj[edge_idxs_in_adj].reshape(-1).long()
g = Data()
g.__num_nodes__ = graph['num_atom']
g.edge_index = edge_list.T
#g.edge_index = torch.from_numpy(edge_list)
g.edge_attr = edge_features
del graph['bond_type']
if graph['atom_type'] is not None:
g.x = graph['atom_type'].long()
del graph['atom_type']
graph_labels.append(graph['logP_SA_cycle_normalized'])
pyg_graph_list.append(g)
for i, g in enumerate(pyg_graph_list):
# if 'classification' in self.task_type:
# if has_nan:
g.y = graph_labels[i].to(torch.float32)
# else:
# g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.long)
# else:
# g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
data, slices = self.collate(pyg_graph_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
class MoleculeDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name='Zinc', framework = 'pyg'):
t0 = time.time()
self.name = name
self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well
self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well
data_dir='./data/molecules'
self.train = MoleculeDGL(data_dir, 'train', num_graphs=10000)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=1000)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=1000)
print("Time taken: {:.4f}s".format(time.time()-t0))
class MoleculeDatasetpyg(InMemoryDataset):
def __init__(self, name='ZINC'):
t0 = time.time()
self.name = name
self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well
self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well
data_dir = './data/molecules'
self.train = Moleculepyg(data_dir, 'train', num_graphs=10000, name='ZINC')
self.val = Moleculepyg(data_dir, 'val', num_graphs=1000, name='ZINC')
self.test = Moleculepyg(data_dir, 'test', num_graphs=1000, name='ZINC')
print("Time taken: {:.4f}s".format(time.time() - t0))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in MoleculeDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
# # Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
# EigVec = EigVec[:, EigVal.argsort()] # increasing order
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class MoleculeDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/molecules/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
self.num_atom_type = f[3]
self.num_bond_type = f[4]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, edge_feat):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if edge_feat:
# use edge feats also to prepare adj
adj_with_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type + self.num_bond_type)])
adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
us, vs = g.edges()
for idx, edge_label in enumerate(g.edata['feat']):
adj_with_edge_feat[edge_label.item()+1+self.num_atom_type][us[idx]][vs[idx]] = 1
for node, node_label in enumerate(g.ndata['feat']):
adj_with_edge_feat[node_label.item()+1][node][node] = 1
x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
return None, x_with_edge_feat, labels
else:
# use only node feats to prepare adj
adj_no_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type)])
adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_no_edge_feat[node_label.item()+1][node][node] = 1
x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
return x_no_edge_feat, None, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
| 14,989
| 39.404313
| 130
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/data.py
|
"""
File to load dataset based on user control from main file
"""
from data.molecules import *
from data.SBMs import SBMsDataset, SBMsDatasetpyg
from data.planetoids import PlanetoidDataset
from data.ogbn import ogbnDatasetpyg
def LoadData(DATASET_NAME, use_node_embedding = False, framework = 'dgl'):
"""
This function is called in the main.py file
returns:
; dataset object
"""
# handling for MNIST or CIFAR Superpixels
# handling for (ZINC) molecule dataset
if DATASET_NAME == 'ZINC':
return MoleculeDataset(DATASET_NAME) if 'dgl' == framework else MoleculeDatasetpyg(DATASET_NAME)
# handling for SBM datasets
SBM_DATASETS = ['SBM_CLUSTER', 'SBM_PATTERN']
if DATASET_NAME in SBM_DATASETS:
return SBMsDatasetpyg(DATASET_NAME) if 'pyg' == framework else SBMsDataset(DATASET_NAME)
if DATASET_NAME in ['Cora', 'Citeseer', 'Pubmed']:
return PlanetoidDataset(DATASET_NAME, use_node_embedding = use_node_embedding)
if DATASET_NAME in ['ogbn-arxiv', 'ogbn-proteins', 'ogbn-mag', 'ogbn-products']:
return ogbnDatasetpyg(name=DATASET_NAME, use_node_embedding = use_node_embedding)
| 1,193
| 33.114286
| 104
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/node2vec_citeseer.py
|
import argparse
import torch
from torch_geometric.nn import Node2Vec
from torch_geometric.utils import to_undirected
import torch_geometric as pyg
from ogb.nodeproppred import PygNodePropPredDataset
import os.path as osp
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'data/planetoid/embedding_Citeseer.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-citeseer (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=256)
parser.add_argument('--walk_length', type=int, default=80)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
# root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'arxiv')
data_dir = 'planetoid'
dataset = pyg.datasets.Planetoid(name='Citeseer',root = data_dir)
data = dataset[0]
data.edge_index = to_undirected(data.edge_index, data.num_nodes)
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4)
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,380
| 36.793651
| 89
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/SBMs.py
|
import time
import os
import pickle
import numpy as np
import os.path as osp
import dgl
import torch
from ogb.utils.url import decide_download, download_url, extract_zip
from scipy import sparse as sp
import numpy as np
from tqdm import tqdm
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data
from scipy.sparse import csr_matrix
from torch_geometric.utils import get_laplacian
class load_SBMsDataSetDGL(torch.utils.data.Dataset):
def __init__(self,
data_dir,
name,
split):
self.split = split
self.is_test = split.lower() in ['test', 'val']
with open(os.path.join(data_dir, name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
self.node_labels = []
self.graph_lists = []
self.n_samples = len(self.dataset)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.n_samples, self.split.upper()))
for data in self.dataset:
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(node_features.size(0))
g.ndata['feat'] = node_features.long()
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
# adding edge features for Residual Gated ConvNet
#edge_feat_dim = g.ndata['feat'].size(1) # dim same as node feature dim
edge_feat_dim = 1 # dim same as node feature dim
g.edata['feat'] = torch.ones(g.number_of_edges(), edge_feat_dim)
self.graph_lists.append(g)
self.node_labels.append(data.node_label)
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.node_labels[idx]
class PygNodeSBMsDataset(InMemoryDataset):
def __init__(self,
data_dir,
name,
split,
transform = None,
pre_transform = None,
meta_dict = None
):
self.url = ''
self.split = split
self.root = data_dir
self.original_root = data_dir
self.name = name
self.is_test = split.lower() in ['test', 'val']
self.node_labels = []
self.graph_lists = []
super(PygNodeSBMsDataset, self).__init__(self.root, transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root)
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
return [os.path.join(self.name + '_%s.pkl' % self.split)]
@property
def processed_file_names(self):
return 'geometric_data_processed' + self.name + self.split + '.pt'
# def download(self):
# r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
# raise NotImplementedError
def download(self):
url = self.url
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
shutil.rmtree(self.root)
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop downloading.')
shutil.rmtree(self.root)
exit(-1)
def process(self):
with open(os.path.join(self.root, self.name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
# self.n_samples = len(self.dataset)
print("preparing graphs for the %s set..." % (self.split.upper()))
print('Converting graphs into PyG objects...')
pyg_graph_list = []
for data in tqdm(self.dataset):
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
g = Data()
g.__num_nodes__ = node_features.size(0)
g.edge_index = edge_list.T
#g.edge_index = torch.from_numpy(edge_list)
g.x = node_features.long()
# adding edge features for Residual Gated ConvNet
edge_feat_dim = 1
g.edge_attr = torch.ones(g.num_edges, edge_feat_dim)
g.y = data.node_label.to(torch.float32)
pyg_graph_list.append(g)
del self.dataset
data, slices = self.collate(pyg_graph_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class SBMsDatasetpyg(InMemoryDataset):
def __init__(self, name):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/SBMs'
self.train = PygNodeSBMsDataset(data_dir, name, split='train')
self.test = PygNodeSBMsDataset(data_dir, name, split='test')
self.val = PygNodeSBMsDataset(data_dir, name, split='val')
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
# iter(self.train)
self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'pyg') for _, g in enumerate(self.train)]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'pyg') for _, g in enumerate(self.val)]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'pyg') for _, g in enumerate(self.test)]
# self.train.data.pos_enc = [torch.cat(g.pos_enc,dim=0) for _, g in enumerate(self.train.graph_lists)]
self.train.data, self.train.slices = self.collate(self.train.graph_lists)
self.val.data, self.val.slices = self.collate(self.val.graph_lists)
self.test.data, self.test.slices = self.collate(self.test.graph_lists)
class SBMsDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/SBMs'
self.train = load_SBMsDataSetDGL(data_dir, name, split='train')
self.test = load_SBMsDataSetDGL(data_dir, name, split='test')
self.val = load_SBMsDataSetDGL(data_dir, name, split='val')
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in SBMsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim, framework = 'dgl'):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian,for the pyg
if framework == 'pyg':
L = get_laplacian(g.edge_index,normalization='sym',dtype = torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
elif framework == 'dgl':
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
# # Eigenvectors with numpy
# EigVal, EigVec = np.linalg.eig(L.toarray())
# idx = EigVal.argsort() # increasing order
# EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class SBMsDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/SBMs/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs which use; such as RingGNN and 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if self.name == 'SBM_CLUSTER':
self.num_node_type = 7
elif self.name == 'SBM_PATTERN':
self.num_node_type = 3
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(self.num_node_type)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_node_feat[node_label.item()+1][node][node] = 1
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.test.graph_lists]
| 14,591
| 37.70557
| 127
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/node2vec_proteins.py
|
import argparse
import torch
from torch_geometric.nn import Node2Vec
from ogb.nodeproppred import PygNodePropPredDataset
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'ogbn/embedding_proteins.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-Proteins (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=128)
parser.add_argument('--walk_length', type=int, default=80)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-proteins',root = 'ogbn')
data = dataset[0]
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4)
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,096
| 34.542373
| 79
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/CSL.py
|
import numpy as np, time, pickle, random, csv
import torch
from torch.utils.data import DataLoader, Dataset
import os
import pickle
import numpy as np
import dgl
from sklearn.model_selection import StratifiedKFold, train_test_split
random.seed(42)
from scipy import sparse as sp
class DGLFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.graph_lists = lists[0]
self.graph_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
def format_dataset(dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
graphs = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
for graph in graphs:
#graph.ndata['feat'] = torch.FloatTensor(graph.ndata['feat'])
graph.ndata['feat'] = graph.ndata['feat'].float() # dgl 4.0
# adding edge features for Residual Gated ConvNet, if not there
if 'feat' not in graph.edata.keys():
edge_feat_dim = graph.ndata['feat'].shape[1] # dim same as node feature dim
graph.edata['feat'] = torch.ones(graph.number_of_edges(), edge_feat_dim)
return DGLFormDataset(graphs, labels)
def get_all_split_idx(dataset):
"""
- Split total number of graphs into 3 (train, val and test) in 3:1:1
- Stratified split proportionate to original distribution of data with respect to classes
- Using sklearn to perform the split and then save the indexes
- Preparing 5 such combinations of indexes split to be used in Graph NNs
- As with KFold, each of the 5 fold have unique test set.
"""
root_idx_dir = './data/CSL/'
if not os.path.exists(root_idx_dir):
os.makedirs(root_idx_dir)
all_idx = {}
# If there are no idx files, do the split and store the files
if not (os.path.exists(root_idx_dir + dataset.name + '_train.index')):
print("[!] Splitting the data into train/val/test ...")
# Using 5-fold cross val as used in RP-GNN paper
k_splits = 5
cross_val_fold = StratifiedKFold(n_splits=k_splits, shuffle=True)
k_data_splits = []
# this is a temporary index assignment, to be used below for val splitting
for i in range(len(dataset.graph_lists)):
dataset[i][0].a = lambda: None
setattr(dataset[i][0].a, 'index', i)
for indexes in cross_val_fold.split(dataset.graph_lists, dataset.graph_labels):
remain_index, test_index = indexes[0], indexes[1]
remain_set = format_dataset([dataset[index] for index in remain_index])
# Gets final 'train' and 'val'
train, val, _, __ = train_test_split(remain_set,
range(len(remain_set.graph_lists)),
test_size=0.25,
stratify=remain_set.graph_labels)
train, val = format_dataset(train), format_dataset(val)
test = format_dataset([dataset[index] for index in test_index])
# Extracting only idxs
idx_train = [item[0].a.index for item in train]
idx_val = [item[0].a.index for item in val]
idx_test = [item[0].a.index for item in test]
f_train_w = csv.writer(open(root_idx_dir + dataset.name + '_train.index', 'a+'))
f_val_w = csv.writer(open(root_idx_dir + dataset.name + '_val.index', 'a+'))
f_test_w = csv.writer(open(root_idx_dir + dataset.name + '_test.index', 'a+'))
f_train_w.writerow(idx_train)
f_val_w.writerow(idx_val)
f_test_w.writerow(idx_test)
print("[!] Splitting done!")
# reading idx from the files
for section in ['train', 'val', 'test']:
with open(root_idx_dir + dataset.name + '_'+ section + '.index', 'r') as f:
reader = csv.reader(f)
all_idx[section] = [list(map(int, idx)) for idx in reader]
return all_idx
class CSL(torch.utils.data.Dataset):
"""
Circular Skip Link Graphs:
Source: https://github.com/PurdueMINDS/RelationalPooling/
"""
def __init__(self, path="data/CSL/"):
self.name = "CSL"
self.adj_list = pickle.load(open(os.path.join(path, 'graphs_Kary_Deterministic_Graphs.pkl'), 'rb'))
self.graph_labels = torch.load(os.path.join(path, 'y_Kary_Deterministic_Graphs.pt'))
self.graph_lists = []
self.n_samples = len(self.graph_labels)
self.num_node_type = 1 #41
self.num_edge_type = 1 #164
self._prepare()
def _prepare(self):
t0 = time.time()
print("[I] Preparing Circular Skip Link Graphs v4 ...")
for sample in self.adj_list:
_g = dgl.DGLGraph()
_g.from_scipy_sparse_matrix(sample)
g = dgl.transform.remove_self_loop(_g)
g.ndata['feat'] = torch.zeros(g.number_of_nodes()).long()
#g.ndata['feat'] = torch.arange(0, g.number_of_nodes()).long() # v1
#g.ndata['feat'] = torch.randperm(g.number_of_nodes()).long() # v3
# adding edge features as generic requirement
g.edata['feat'] = torch.zeros(g.number_of_edges()).long()
#g.edata['feat'] = torch.arange(0, g.number_of_edges()).long() # v1
#g.edata['feat'] = torch.ones(g.number_of_edges()).long() # v2
# NOTE: come back here, to define edge features as distance between the indices of the edges
###################################################################
# srcs, dsts = new_g.edges()
# edge_feat = []
# for edge in range(len(srcs)):
# a = srcs[edge].item()
# b = dsts[edge].item()
# edge_feat.append(abs(a-b))
# g.edata['feat'] = torch.tensor(edge_feat, dtype=torch.int).long()
###################################################################
self.graph_lists.append(g)
self.num_node_type = self.graph_lists[0].ndata['feat'].size(0)
self.num_edge_type = self.graph_lists[0].edata['feat'].size(0)
print("[I] Finished preparation after {:.4f}s".format(time.time()-t0))
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
return self.graph_lists[idx], self.graph_labels[idx]
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in TUsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
n = g.number_of_nodes()
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(n) - N * A * N
# Eigenvectors
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
return g
class CSLDataset(torch.utils.data.Dataset):
def __init__(self, name='CSL'):
t0 = time.time()
self.name = name
dataset = CSL()
print("[!] Dataset: ", self.name)
# this function splits data into train/val/test and returns the indices
self.all_idx = get_all_split_idx(dataset)
self.all = dataset
self.train = [self.format_dataset([dataset[idx] for idx in self.all_idx['train'][split_num]]) for split_num in range(5)]
self.val = [self.format_dataset([dataset[idx] for idx in self.all_idx['val'][split_num]]) for split_num in range(5)]
self.test = [self.format_dataset([dataset[idx] for idx in self.all_idx['test'][split_num]]) for split_num in range(5)]
print("Time taken: {:.4f}s".format(time.time()-t0))
def format_dataset(self, dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
graphs = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
return DGLFormDataset(graphs, labels)
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, pos_enc):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if pos_enc:
in_dim = g.ndata['pos_enc'].shape[1]
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['pos_enc']):
adj_node_feat[1:, node, node] = node_feat
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
else: # no node features here
in_dim = 1
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['feat']):
adj_node_feat[1:, node, node] = node_feat
x_no_node_feat = adj_node_feat.unsqueeze(0)
return x_no_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
for split_num in range(5):
self.train[split_num].graph_lists = [self_loop(g) for g in self.train[split_num].graph_lists]
self.val[split_num].graph_lists = [self_loop(g) for g in self.val[split_num].graph_lists]
self.test[split_num].graph_lists = [self_loop(g) for g in self.test[split_num].graph_lists]
for split_num in range(5):
self.train[split_num] = DGLFormDataset(self.train[split_num].graph_lists, self.train[split_num].graph_labels)
self.val[split_num] = DGLFormDataset(self.val[split_num].graph_lists, self.val[split_num].graph_labels)
self.test[split_num] = DGLFormDataset(self.test[split_num].graph_lists, self.test[split_num].graph_labels)
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
for split_num in range(5):
self.train[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train[split_num].graph_lists]
self.val[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val[split_num].graph_lists]
self.test[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test[split_num].graph_lists]
| 13,727
| 40.101796
| 128
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/node2vec-products.py
|
import argparse
import torch
from torch_geometric.nn import Node2Vec
from ogb.nodeproppred import PygNodePropPredDataset
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'ogbn/embedding_products.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=128)
parser.add_argument('--walk_length', type=int, default=40)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products',root='ogbn')
data = dataset[0]
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4) #change from 4 to 0 for convient debug
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,133
| 35.169492
| 79
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/planetoids.py
|
import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
from torch_geometric.utils import get_laplacian
import csv
from scipy import sparse as sp
import dgl
from dgl.data import TUDataset
from dgl.data import LegacyTUDataset
import torch_geometric as pyg
from scipy.sparse import csr_matrix
import random
random.seed(42)
from sklearn.model_selection import StratifiedKFold, train_test_split
from torch_geometric.data import InMemoryDataset
import csv
import json
class pygFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.node_lists = lists[0]
self.node_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
def format_dataset(dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
nodes = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
return pygFormDataset(nodes, labels)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def get_all_split_idx(dataset):
"""
- Split total number of graphs into 3 (train, val and test) in 80:10:10
- Stratified split proportionate to original distribution of data with respect to classes
- Using sklearn to perform the split and then save the indexes
- Preparing 10 such combinations of indexes split to be used in Graph NNs
- As with KFold, each of the 10 fold have unique test set.
"""
root_idx_dir = './data/planetoid/'
if not os.path.exists(root_idx_dir):
os.makedirs(root_idx_dir)
# If there are no idx files, do the split and store the files
if not os.path.exists(root_idx_dir + f"{dataset.name}_splits.json"):
print("[!] Splitting the data into train/val/test ...")
all_idxs = np.arange(dataset[0].num_nodes)
# Using 10-fold cross val to compare with benchmark papers
k_splits = 10
cross_val_fold = StratifiedKFold(n_splits=k_splits, shuffle=True)
k_data_splits = []
split = {"train": [], "val": [], "test": []}
for train_ok_split, test_ok_split in cross_val_fold.split(X = all_idxs, y = dataset[0].y):
# split = {"train": [], "val": [], "test": all_idxs[test_ok_split]}
train_ok_targets = dataset[0].y[train_ok_split]
# Gets final 'train' and 'val'
train_i_split, val_i_split = train_test_split(train_ok_split,
test_size=0.111,
stratify=train_ok_targets)
# Extracting only idxs
split['train'].append(train_i_split)
split['val'].append(val_i_split)
split['test'].append(all_idxs[test_ok_split])
filename = root_idx_dir + f"{dataset.name}_splits.json"
with open(filename, "w") as f:
json.dump(split, f, cls=NumpyEncoder) # , cls=NumpyEncoder
print("[!] Splitting done!")
# reading idx from the files
with open(root_idx_dir + f"{dataset.name}_splits.json", "r") as fp:
all_idx = json.load(fp)
return all_idx
class DGLFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.graph_lists = lists[0]
self.graph_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in TUsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim, framework = 'pyg'):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian,for the pyg
if framework == 'pyg':
L = get_laplacian(g.edge_index,normalization='sym',dtype = torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
return pos_enc
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
elif framework == 'dgl':
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
class PlanetoidDataset(InMemoryDataset):
def __init__(self, name, use_node_embedding = False):
t0 = time.time()
self.name = name
data_dir = 'data/planetoid'
#dataset = TUDataset(self.name, hidden_size=1)
# dataset = LegacyTUDataset(self.name, hidden_size=1) # dgl 4.0
self.dataset = pyg.datasets.Planetoid(root=data_dir, name= name ,split = 'full')
print("[!] Dataset: ", self.name)
if use_node_embedding:
embedding = torch.load(data_dir + '/embedding_'+name + '.pt', map_location='cpu')
# self.dataset.data.x = embedding
# self.laplacian = positional_encoding(self.dataset[0], 200, framework = 'pyg')
self.dataset.data.x = torch.cat([self.dataset.data.x, embedding], dim=-1)
# this function splits data into train/val/test and returns the indices
self.all_idx = get_all_split_idx(self.dataset)
edge_feat_dim = 1
self.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim)
# self.all = dataset
# dataset.train[split_number]
self.train_idx = [torch.tensor(self.all_idx['train'][split_num], dtype=torch.long) for split_num in range(10)]
self.val_idx = [torch.tensor(self.all_idx['val'][split_num], dtype=torch.long) for split_num in range(10)]
self.test_idx = [torch.tensor(self.all_idx['test'][split_num], dtype=torch.long) for split_num in range(10)]
# self.train = [self.format_dataset([dataset[idx] for idx in self.all_idx['train'][split_num]]) for split_num in range(10)]
# self.val = [self.format_dataset([dataset[idx] for idx in self.all_idx['val'][split_num]]) for split_num in range(10)]
# self.test = [self.format_dataset([dataset[idx] for idx in self.all_idx['test'][split_num]]) for split_num in range(10)]
print("Time taken: {:.4f}s".format(time.time()-t0))
def format_dataset(self, dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
graphs = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
for graph in graphs:
#graph.ndata['feat'] = torch.FloatTensor(graph.ndata['feat'])
graph.ndata['feat'] = graph.ndata['feat'].float() # dgl 4.0
# adding edge features for Residual Gated ConvNet, if not there
if 'feat' not in graph.edata.keys():
edge_feat_dim = graph.ndata['feat'].shape[1] # dim same as node feature dim
graph.edata['feat'] = torch.ones(graph.number_of_edges(), edge_feat_dim)
return DGLFormDataset(graphs, labels)
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
in_dim = g.ndata['feat'].shape[1]
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['feat']):
adj_node_feat[1:, node, node] = node_feat
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
for split_num in range(10):
self.train[split_num].graph_lists = [self_loop(g) for g in self.train[split_num].graph_lists]
self.val[split_num].graph_lists = [self_loop(g) for g in self.val[split_num].graph_lists]
self.test[split_num].graph_lists = [self_loop(g) for g in self.test[split_num].graph_lists]
for split_num in range(10):
self.train[split_num] = DGLFormDataset(self.train[split_num].graph_lists, self.train[split_num].graph_labels)
self.val[split_num] = DGLFormDataset(self.val[split_num].graph_lists, self.val[split_num].graph_labels)
self.test[split_num] = DGLFormDataset(self.test[split_num].graph_lists, self.test[split_num].graph_labels)
| 13,158
| 43.60678
| 131
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/node2vec_arxiv.py
|
import argparse
import torch
from torch_geometric.nn import Node2Vec
from torch_geometric.utils import to_undirected
from ogb.nodeproppred import PygNodePropPredDataset
import os.path as osp
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'ogbn/embedding_arxiv.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-Arxiv (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=128)
parser.add_argument('--walk_length', type=int, default=80)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
# root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'arxiv')
dataset = PygNodePropPredDataset(name='ogbn-arxiv',root = 'ogbn')
data = dataset[0]
data.edge_index = to_undirected(data.edge_index, data.num_nodes)
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4)
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,306
| 36.819672
| 81
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/data/molecules/prepare_molecules.py
|
#!/usr/bin/env python
# coding: utf-8
# # Notebook for preparing and saving MOLECULAR graphs
# In[1]:
import numpy as np
import torch
import pickle
import time
import os
from IPython import get_ipython
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
# In[2]:
print(torch.__version__)
# # Download ZINC dataset
# In[1]:
#!unzip molecules.zip -d ../
# In[3]:
if not os.path.isfile('molecules.zip'):
print('downloading..')
os.system('curl https://www.dropbox.com/s/feo9qle74kg48gy/molecules.zip?dl=1 -o molecules.zip -J -L -k')
os.system('unzip molecules.zip -d ../')
# !tar -xvf molecules.zip -C ../
else:
print('File already downloaded')
# # Convert to DGL format and save with pickle
# In[4]:
import os
os.chdir('../../') # go to root folder of the project
print(os.getcwd())
# In[5]:
import pickle
# get_ipython().run_line_magic('load_ext', 'autoreload')
# get_ipython().run_line_magic('autoreload', '2')
from data.molecules import MoleculeDatasetDGL ,MoleculeDatasetpyg
from data.data import LoadData
from torch.utils.data import DataLoader
from data.molecules import MoleculeDataset
# In[6]:
framwork = 'pyg'
DATASET_NAME = 'ZINC'
dataset = MoleculeDatasetDGL(DATASET_NAME) if 'dgl' == framwork else MoleculeDatasetpyg(DATASET_NAME)
# In[7]:
def plot_histo_graphs(dataset, title):
# histogram of graph sizes
graph_sizes = []
for graph in dataset:
graph_sizes.append(graph.num_nodes) if framwork == 'pyg' else graph_sizes.append(graph[0].number_of_nodes())
plt.figure(1)
plt.hist(graph_sizes, bins=20)
plt.title(title)
plt.show()
graph_sizes = torch.Tensor(graph_sizes)
print('min/max :',graph_sizes.min().long().item(),graph_sizes.max().long().item())
#plot_histo_graphs(dataset.train,'trainset')
plot_histo_graphs(dataset.val,'valset')
plot_histo_graphs(dataset.test,'testset')
# In[8]:
#print(len(dataset.train))
print(len(dataset.val))
print(len(dataset.test))
#print(dataset.train[0])
print(dataset.val[0])
print(dataset.test[0])
# In[9]:
num_atom_type = 28
num_bond_type = 4
# In[10]:
# start = time.time()
#
# with open('data/molecules/ZINC_dgl.pkl','wb') as f:
# pickle.dump([dataset.train,dataset.val,dataset.test,num_atom_type,num_bond_type],f)
# print('Time (sec):',time.time() - start)
# # Test load function
# In[11]:
# DATASET_NAME = 'ZINC'
# dataset = LoadData(DATASET_NAME, framwork)
# trainset, valset, testset = dataset.train, dataset.val, dataset.test
# In[12]:
from torch_geometric.data import DataLoader
loader = DataLoader(dataset.val, batch_size=32, shuffle=True)
for batch in loader:
print(batch)
print(batch.y)
print(len(batch.y))
# batch_size = 10
# collate = MoleculeDataset.collate
# print(MoleculeDataset)
# train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, collate_fn=collate)
# In[ ]:
# In[ ]:
| 2,951
| 17.110429
| 116
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/out/log2csv-planetoid.py
|
import os
import re
import numpy as np
import csv
def write2csv(path):
# path='Planetoid_node_classification/results/result_GAT_pyg_Citeseer_GPU0_23h12m32s_on_Oct_28_2020.txt'
csv_file=open('results.csv','w',encoding='gbk',newline='')
csv_writer=csv.writer(csv_file)
csv_writer.writerow(['data','model','L','params','train','val','test','epoch'])
totals = []
for path in findAllFile(path):
print(path)
file=open(path)
iterf=iter(file)
for line in iterf:
a = line.find('Dataset:')
b = line.find('net_params={\'L\':')
c=line.find('Model:')
d=line.find('Total Parameters:')
e=line.find('TEST ACCURACY averaged:')
h = line.find('val ACCURACY averaged:')
f=line.find('TRAIN ACCURACY averaged:')
g=line.find(' Average Convergence Time (Epochs):')
# h=line.find('params={\'seed\':')
# print(g)
if a == 0:
dataset = line[line.index(':') + 2:line.index(',')]
if b == 0:
net = line[line.index(':') + 2:line.index(',')]
if c == 0:
model = line[line.index(':')+2:line.index('_')]
if d == 0:
Parameters = line[line.index(':')+2:line.index('\n')]
if e == 0:
TEST = line[line.index(':')+2:line.index('w')-1]
if h == 0:
val = line[line.index(':')+2:line.index('w')-1]
if f == 0:
TRAIN = line[line.index(':') + 2:line.index('w') - 1]
# if h == 0:
# seed = line[line.index(':') + 2:line.index(',')]
if g == 0:
Epochs = line[line.index(':') + 2:line.index('w') - 1]
totals.append([dataset, model, net, Parameters, TRAIN, val,TEST, Epochs])
# csv_writer.writerow([dataset, model, net, Parameters, TRAIN, TEST, Epochs])
break
totals.sort(key=lambda x: ((x[0]), (x[1]), int(x[2])), reverse=False)
out = []
calculate = []
for i in range(totals.__len__()):
out.append(totals[i])
csv_writer.writerow(out[i])
if (i+1)%4 == 0:
avg_train_acc = np.array(totals[i-3:i+1])[:,4]
avg_val_acc = np.array(totals[i-3:i+1])[:,5]
avg_test_acc = np.array(totals[i-3:i+1])[:,6]
# avg_test_acc [totals[i-4:i][0][4], totals[:4][1][4], totals[:4][2][4], totals[:4][3][4]]
avg_epoch = np.array(totals[i-3:i+1])[:,7]
train_acc=str(np.around(np.mean(np.array(avg_train_acc, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_train_acc, dtype=np.float32),ddof = 1),decimals=4))
val_acc = str(np.around(np.mean(np.array(avg_val_acc, dtype=np.float32)),decimals=4)) + '±' + str(np.around(np.std(np.array(avg_val_acc, dtype=np.float32), ddof=1),decimals=4))
test_acc= str(np.around(np.mean(np.array(avg_test_acc, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_test_acc, dtype=np.float32),ddof = 1),decimals=4))
Epochs_acc = str(np.around(np.mean(np.array(avg_epoch, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_epoch, dtype=np.float32),ddof = 1),decimals=4))
calculate.append([out[i-1][0], out[i-1][1], out[i-1][2], out[i-1][3], train_acc, val_acc ,test_acc, Epochs_acc])
csv_writer.writerow(calculate[int((i+1)/4-1)])
csv_file.close()
file.close()
def findAllFile(base):
for root, ds, fs in os.walk(base):
for f in fs:
if f.endswith('.txt'):
fullname = os.path.join(root, f)
yield fullname
def main():
# base = 'Planetoid_node_classification/results/'SBMs_node_classification
base = 'SBMs_node_classification/results/'
# for path in findAllFile(base):
# print(path)
np.set_printoptions(precision=4)
write2csv(base)
if __name__ == '__main__':
main()
| 4,018
| 45.732558
| 189
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/out/log2csv-sbm.py
|
import os
import re
import numpy as np
import csv
def write2csv(path):
# path='Planetoid_node_classification/results/result_GAT_pyg_Citeseer_GPU0_23h12m32s_on_Oct_28_2020.txt'
csv_file=open('results.csv','w',encoding='gbk',newline='')
csv_writer=csv.writer(csv_file)
csv_writer.writerow(['data','model','L','params','train','val','test','epoch'])
totals = []
for path in findAllFile(path):
print(path)
file=open(path)
iterf=iter(file)
for line in iterf:
a = line.find('Dataset:')
b = line.find('net_params={\'L\':')
c=line.find('Model:')
d=line.find('Total Parameters:')
e=line.find('TEST ACCURACY')
h = line.find('val ACCURACY')
f=line.find('TRAIN ACCURACY')
g=line.find(' Convergence Time (Epochs):')
# h=line.find('params={\'seed\':')
# print(g)
if a == 0:
dataset = line[line.index(':') + 2:line.index(',')]
if b == 0:
net = line[line.index(':') + 2:line.index(',')]
if c == 0:
model = line[line.index(':')+2:line.index('_')]
if d == 0:
Parameters = line[line.index(':')+2:line.index('\n')]
if e == 0:
TEST = line[line.index(':')+2:line.index('\n')]
if h == 0:
val = line[line.index(':')+2:line.index('\n')]
if f == 0:
TRAIN = line[line.index(':') + 2:line.index('\n')]
# if h == 0:
# seed = line[line.index(':') + 2:line.index(',')]
if g == 0:
Epochs = line[line.index(':') + 2:line.index('\n')]
totals.append([dataset, model, net, Parameters, TRAIN, val,TEST, Epochs])
# csv_writer.writerow([dataset, model, net, Parameters, TRAIN, TEST, Epochs])
break
totals.sort(key=lambda x: ((x[0]), (x[1]), int(x[2])), reverse=False)
out = []
calculate = []
for i in range(totals.__len__()):
out.append(totals[i])
csv_writer.writerow(out[i])
if (i+1)%4 == 0:
avg_train_acc = np.array(totals[i-3:i+1])[:,4]
avg_val_acc = np.array(totals[i-3:i+1])[:,5]
avg_test_acc = np.array(totals[i-3:i+1])[:,6]
# avg_test_acc [totals[i-4:i][0][4], totals[:4][1][4], totals[:4][2][4], totals[:4][3][4]]
avg_epoch = np.array(totals[i-3:i+1])[:,7]
train_acc=str(np.around(np.mean(np.array(avg_train_acc, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_train_acc, dtype=np.float32),ddof = 1),decimals=4))
val_acc = str(np.around(np.mean(np.array(avg_val_acc, dtype=np.float32)),decimals=4)) + '±' + str(np.around(np.std(np.array(avg_val_acc, dtype=np.float32), ddof=1),decimals=4))
test_acc= str(np.around(np.mean(np.array(avg_test_acc, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_test_acc, dtype=np.float32),ddof = 1),decimals=4))
Epochs_acc = str(np.around(np.mean(np.array(avg_epoch, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_epoch, dtype=np.float32),ddof = 1),decimals=4))
calculate.append([out[i-1][0], out[i-1][1], out[i-1][2], out[i-1][3], train_acc, val_acc ,test_acc, Epochs_acc])
csv_writer.writerow(calculate[int((i+1)/4-1)])
csv_file.close()
file.close()
def findAllFile(base):
for root, ds, fs in os.walk(base):
for f in fs:
if f.endswith('.txt'):
fullname = os.path.join(root, f)
yield fullname
def main():
# base = 'Planetoid_node_classification/results/'SBMs_node_classification
base = 'SBMs_node_classification/results/'
# for path in findAllFile(base):
# print(path)
np.set_printoptions(precision=4)
write2csv(base)
if __name__ == '__main__':
main()
| 3,972
| 45.197674
| 189
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/out/log2csv-ogb.py
|
import os
import re
import numpy as np
import csv
def write2csv(path):
# path='Planetoid_node_classification/results/result_GAT_pyg_Citeseer_GPU0_23h12m32s_on_Oct_28_2020.txt'
csv_file=open('results.csv','w',encoding='gbk',newline='')
csv_writer=csv.writer(csv_file)
csv_writer.writerow(['data','model','L','params','train','val','test','epoch'])
totals = []
for path in findAllFile(path):
print(path)
file=open(path)
iterf=iter(file)
for line in iterf:
a = line.find('Dataset:')
b = line.find('net_params={\'L\':')
c=line.find('Model:')
d=line.find('Total Parameters:')
e=line.find('TEST ACCURACY:')
h=line.find('val ACCURACY:')
f=line.find('TRAIN ACCURACY:')
g=line.find(' Convergence Time (Epochs):')
# h=line.find('params={\'seed\':')
# print(g)
if a == 0:
dataset = line[line.index(':') + 2:line.index(',')]
if b == 0:
net = line[line.index(':') + 2:line.index(',')]
if c == 0:
model = line[line.index(':')+2:line.index('_')]
if d == 0:
Parameters = line[line.index(':')+2:line.index('\n')]
if e == 0:
TEST = line[line.index(':')+2:line.index('\n')]
if h == 0:
val = line[line.index(':')+2:line.index('\n')]
if f == 0:
TRAIN = line[line.index(':') + 2:line.index('\n')]
# if h == 0:
# seed = line[line.index(':') + 2:line.index(',')]
if g == 0:
Epochs = line[line.index(':') + 2:line.index('\n')]
totals.append([dataset, model, net, Parameters, TRAIN, val, TEST, Epochs])
# csv_writer.writerow([dataset, model, net, Parameters, TRAIN, TEST, Epochs])
break
totals.sort(key=lambda x: ((x[0]), (x[1]), int(x[2])), reverse=False)
out = []
calculate = []
for i in range(totals.__len__()):
out.append(totals[i])
csv_writer.writerow(out[i])
if (i+1)%4 == 0:
avg_train_acc = np.array(totals[i-3:i+1])[:,4]
avg_val_acc = np.array(totals[i-3:i+1])[:,5]
avg_test_acc = np.array(totals[i - 3:i + 1])[:, 6]
# avg_test_acc [totals[i-4:i][0][4], totals[:4][1][4], totals[:4][2][4], totals[:4][3][4]]
avg_epoch = np.array(totals[i-3:i+1])[:,7]
# train_acc=str(np.around((np.mean(np.array(avg_train_acc, dtype=np.float32)))*100,decimals=4))+'±'+str(np.around((np.std(np.array(avg_train_acc, dtype=np.float32),ddof = 1))*100,decimals=4))
# val_acc = str(np.around((np.mean(np.array(avg_val_acc, dtype=np.float32)))*100,decimals=4)) + '±' + str(np.around((np.std(np.array(avg_val_acc, dtype=np.float32), ddof=1))*100,decimals=4))
# test_acc= str(np.around((np.mean(np.array(avg_test_acc, dtype=np.float32)))*100,decimals=4))+'±'+str(np.around((np.std(np.array(avg_test_acc, dtype=np.float32),ddof = 1))*100,decimals=4))
train_acc = str(
np.around(np.mean(np.array(avg_train_acc, dtype=np.float32)), decimals=4)) + '±' + str(
np.around(np.std(np.array(avg_train_acc, dtype=np.float32), ddof=1), decimals=4))
val_acc = str(np.around(np.mean(np.array(avg_val_acc, dtype=np.float32)), decimals=4)) + '±' + str(
np.around(np.std(np.array(avg_val_acc, dtype=np.float32), ddof=1), decimals=4))
test_acc = str(
np.around(np.mean(np.array(avg_test_acc, dtype=np.float32)), decimals=4)) + '±' + str(
np.around(np.std(np.array(avg_test_acc, dtype=np.float32), ddof=1), decimals=4))
Epochs_acc = str(np.around(np.mean(np.array(avg_epoch, dtype=np.float32)),decimals=4))+'±'+str(np.around(np.std(np.array(avg_epoch, dtype=np.float32),ddof = 1),decimals=4))
calculate.append([out[i-1][0], out[i-1][1], out[i-1][2], out[i-1][3], train_acc, val_acc,test_acc , Epochs_acc])
csv_writer.writerow(calculate[int((i+1)/4-1)])
csv_file.close()
file.close()
def findAllFile(base):
for root, ds, fs in os.walk(base):
for f in fs:
if f.endswith('.txt'):
fullname = os.path.join(root, f)
yield fullname
def main():
# base = 'ogb_node_classification/results/'
base = 'out-input/posenc/results/'
# / home / karl / works / syncbenchmarking - gnns / out / out - input / node_embedding / results
# for path in findAllFile(base):
# print(path)
np.set_printoptions(precision=4)
write2csv(base)
if __name__ == '__main__':
main()
| 4,751
| 49.021053
| 203
|
py
|
SleePyCo
|
SleePyCo-main/train_mtcl.py
|
import os
import json
import argparse
import warnings
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from utils import *
from loader import EEGDataLoader
from models.main_model import MainModel
class OneFoldTrainer:
def __init__(self, args, fold, config):
self.args = args
self.fold = fold
self.cfg = config
self.ds_cfg = config['dataset']
self.fp_cfg = config['feature_pyramid']
self.tp_cfg = config['training_params']
self.es_cfg = self.tp_cfg['early_stopping']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('[INFO] Config name: {}'.format(config['name']))
self.train_iter = 0
self.model = self.build_model()
self.loader_dict = self.build_dataloader()
self.criterion = nn.CrossEntropyLoss()
self.activate_train_mode()
self.optimizer = optim.Adam([p for p in self.model.parameters() if p.requires_grad], lr=self.tp_cfg['lr'], weight_decay=self.tp_cfg['weight_decay'])
self.ckpt_path = os.path.join('checkpoints', config['name'])
self.ckpt_name = 'ckpt_fold-{0:02d}.pth'.format(self.fold)
self.early_stopping = EarlyStopping(patience=self.es_cfg['patience'], verbose=True, ckpt_path=self.ckpt_path, ckpt_name=self.ckpt_name, mode=self.es_cfg['mode'])
def build_model(self):
model = MainModel(self.cfg)
print('[INFO] Number of params of model: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
model = torch.nn.DataParallel(model, device_ids=list(range(len(self.args.gpu.split(",")))))
if self.tp_cfg['mode'] != 'scratch':
print('[INFO] Model loaded for finetune')
load_name = self.cfg['name'].replace('SL-{:02d}'.format(self.ds_cfg['seq_len']), 'SL-01')
load_name = load_name.replace('numScales-{}'.format(self.fp_cfg['num_scales']), 'numScales-1')
load_name = load_name.replace(self.tp_cfg['mode'], 'pretrain')
load_path = os.path.join('checkpoints', load_name, 'ckpt_fold-{0:02d}.pth'.format(self.fold))
model.load_state_dict(torch.load(load_path), strict=False)
model.to(self.device)
print('[INFO] Model prepared, Device used: {} GPU:{}'.format(self.device, self.args.gpu))
return model
def build_dataloader(self):
train_dataset = EEGDataLoader(self.cfg, self.fold, set='train')
train_loader = DataLoader(dataset=train_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=True, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
val_dataset = EEGDataLoader(self.cfg, self.fold, set='val')
val_loader = DataLoader(dataset=val_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=False, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
test_dataset = EEGDataLoader(self.cfg, self.fold, set='test')
test_loader = DataLoader(dataset=test_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=False, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
print('[INFO] Dataloader prepared')
return {'train': train_loader, 'val': val_loader, 'test': test_loader}
def activate_train_mode(self):
self.model.train()
if self.tp_cfg['mode'] == 'freezefinetune':
print('[INFO] Freeze backone')
self.model.module.feature.train(False)
for p in self.model.module.feature.parameters():
p.requires_grad = False
print('[INFO] Unfreeze conv_c5')
self.model.module.feature.conv_c5.train(True)
for p in self.model.module.feature.conv_c5.parameters(): p.requires_grad = True
if self.fp_cfg['num_scales'] > 1:
print('[INFO] Unfreeze conv_c4')
self.model.module.feature.conv_c4.train(True)
for p in self.model.module.feature.conv_c4.parameters(): p.requires_grad = True
if self.fp_cfg['num_scales'] > 2:
print('[INFO] Unfreeze conv_c3')
self.model.module.feature.conv_c3.train(True)
for p in self.model.module.feature.conv_c3.parameters(): p.requires_grad = True
def train_one_epoch(self, epoch):
correct, total, train_loss = 0, 0, 0
for i, (inputs, labels) in enumerate(self.loader_dict['train']):
loss = 0
total += labels.size(0)
inputs = inputs.to(self.device)
labels = labels.view(-1).to(self.device)
outputs = self.model(inputs)
outputs_sum = torch.zeros_like(outputs[0])
for j in range(len(outputs)):
loss += self.criterion(outputs[j], labels)
outputs_sum += outputs[j]
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss += loss.item()
predicted = torch.argmax(outputs_sum, 1)
correct += predicted.eq(labels).sum().item()
self.train_iter += 1
progress_bar(i, len(self.loader_dict['train']), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (i + 1), 100. * correct / total, correct, total))
if self.train_iter % self.tp_cfg['val_period'] == 0:
print('')
val_acc, val_loss = self.evaluate(mode='val')
self.early_stopping(val_acc, val_loss, self.model)
self.activate_train_mode()
if self.early_stopping.early_stop:
break
@torch.no_grad()
def evaluate(self, mode):
self.model.eval()
correct, total, eval_loss = 0, 0, 0
y_true = np.zeros(0)
y_pred = np.zeros((0, self.cfg['classifier']['num_classes']))
for i, (inputs, labels) in enumerate(self.loader_dict[mode]):
loss = 0
total += labels.size(0)
inputs = inputs.to(self.device)
labels = labels.view(-1).to(self.device)
outputs = self.model(inputs)
outputs_sum = torch.zeros_like(outputs[0])
for j in range(len(outputs)):
loss += self.criterion(outputs[j], labels)
outputs_sum += outputs[j]
eval_loss += loss.item()
predicted = torch.argmax(outputs_sum, 1)
correct += predicted.eq(labels).sum().item()
y_true = np.concatenate([y_true, labels.cpu().numpy()])
y_pred = np.concatenate([y_pred, outputs_sum.cpu().numpy()])
progress_bar(i, len(self.loader_dict[mode]), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (eval_loss / (i + 1), 100. * correct / total, correct, total))
if mode == 'val':
return 100. * correct / total, eval_loss
elif mode == 'test':
return y_true, y_pred
else:
raise NotImplementedError
def run(self):
for epoch in range(self.tp_cfg['max_epochs']):
print('\n[INFO] Fold: {}, Epoch: {}'.format(self.fold, epoch))
self.train_one_epoch(epoch)
if self.early_stopping.early_stop:
break
self.model.load_state_dict(torch.load(os.path.join(self.ckpt_path, self.ckpt_name)))
y_true, y_pred = self.evaluate(mode='test')
print('')
return y_true, y_pred
def main():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--gpu', type=str, default="0", help='gpu id')
parser.add_argument('--config', type=str, help='config file path')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# For reproducibility
set_random_seed(args.seed, use_cuda=True)
with open(args.config) as config_file:
config = json.load(config_file)
config['name'] = os.path.basename(args.config).replace('.json', '')
Y_true = np.zeros(0)
Y_pred = np.zeros((0, config['classifier']['num_classes']))
for fold in range(1, config['dataset']['num_splits'] + 1):
trainer = OneFoldTrainer(args, fold, config)
y_true, y_pred = trainer.run()
Y_true = np.concatenate([Y_true, y_true])
Y_pred = np.concatenate([Y_pred, y_pred])
summarize_result(config, fold, Y_true, Y_pred)
if __name__ == "__main__":
main()
| 8,872
| 40.853774
| 170
|
py
|
SleePyCo
|
SleePyCo-main/test.py
|
import os
import json
import argparse
import warnings
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from utils import *
from loader import EEGDataLoader
from train_mtcl import OneFoldTrainer
from models.main_model import MainModel
class OneFoldEvaluator(OneFoldTrainer):
def __init__(self, args, fold, config):
self.args = args
self.fold = fold
self.cfg = config
self.ds_cfg = config['dataset']
self.tp_cfg = config['training_params']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('[INFO] Config name: {}'.format(config['name']))
self.model = self.build_model()
self.loader_dict = self.build_dataloader()
self.criterion = nn.CrossEntropyLoss()
self.ckpt_path = os.path.join('checkpoints', config['name'])
self.ckpt_name = 'ckpt_fold-{0:02d}.pth'.format(self.fold)
def build_model(self):
model = MainModel(self.cfg)
print('[INFO] Number of params of model: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
model = torch.nn.DataParallel(model, device_ids=list(range(len(self.args.gpu.split(",")))))
model.to(self.device)
print('[INFO] Model prepared, Device used: {} GPU:{}'.format(self.device, self.args.gpu))
return model
def build_dataloader(self):
test_dataset = EEGDataLoader(self.cfg, self.fold, set='test')
test_loader = DataLoader(dataset=test_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=False, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
print('[INFO] Dataloader prepared')
return {'test': test_loader}
def run(self):
print('\n[INFO] Fold: {}'.format(self.fold))
self.model.load_state_dict(torch.load(os.path.join(self.ckpt_path, self.ckpt_name)))
y_true, y_pred = self.evaluate(mode='test')
print('')
return y_true, y_pred
def main():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--gpu', type=str, default="0", help='gpu id')
parser.add_argument('--config', type=str, help='config file path')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
with open(args.config) as config_file:
config = json.load(config_file)
config['name'] = os.path.basename(args.config).replace('.json', '')
Y_true = np.zeros(0)
Y_pred = np.zeros((0, config['classifier']['num_classes']))
for fold in range(1, config['dataset']['num_splits'] + 1):
evaluator = OneFoldEvaluator(args, fold, config)
y_true, y_pred = evaluator.run()
Y_true = np.concatenate([Y_true, y_true])
Y_pred = np.concatenate([Y_pred, y_pred])
summarize_result(config, fold, Y_true, Y_pred)
if __name__ == "__main__":
main()
| 3,233
| 34.933333
| 169
|
py
|
SleePyCo
|
SleePyCo-main/train_crl.py
|
import os
import json
import argparse
import warnings
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from utils import *
from loss import SupConLoss
from loader import EEGDataLoader
from models.main_model import MainModel
class OneFoldTrainer:
def __init__(self, args, fold, config):
self.args = args
self.fold = fold
self.cfg = config
self.tp_cfg = config['training_params']
self.es_cfg = self.tp_cfg['early_stopping']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('[INFO] Config name: {}'.format(config['name']))
self.train_iter = 0
self.model = self.build_model()
self.loader_dict = self.build_dataloader()
self.criterion = SupConLoss(temperature=self.tp_cfg['temperature'])
self.optimizer = optim.Adam(self.model.parameters(), lr=self.tp_cfg['lr'], weight_decay=self.tp_cfg['weight_decay'])
self.ckpt_path = os.path.join('checkpoints', config['name'])
self.ckpt_name = 'ckpt_fold-{0:02d}.pth'.format(self.fold)
self.early_stopping = EarlyStopping(patience=self.es_cfg['patience'], verbose=True, ckpt_path=self.ckpt_path, ckpt_name=self.ckpt_name, mode=self.es_cfg['mode'])
def build_model(self):
model = MainModel(self.cfg)
print('[INFO] Number of params of model: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
model = torch.nn.DataParallel(model, device_ids=list(range(len(self.args.gpu.split(",")))))
model.to(self.device)
print('[INFO] Model prepared, Device used: {} GPU:{}'.format(self.device, self.args.gpu))
return model
def build_dataloader(self):
dataloader_args = {'batch_size': self.tp_cfg['batch_size'], 'shuffle': True, 'num_workers': 4*len(self.args.gpu.split(",")), 'pin_memory': True}
train_dataset = EEGDataLoader(self.cfg, self.fold, set='train')
train_loader = DataLoader(dataset=train_dataset, **dataloader_args)
val_dataset = EEGDataLoader(self.cfg, self.fold, set='val')
val_loader = DataLoader(dataset=val_dataset, **dataloader_args)
print('[INFO] Dataloader prepared')
return {'train': train_loader, 'val': val_loader}
def train_one_epoch(self):
self.model.train()
train_loss = 0
for i, (inputs, labels) in enumerate(self.loader_dict['train']):
loss = 0
labels = labels.view(-1).to(self.device)
inputs = torch.cat([inputs[0], inputs[1]], dim=0).to(self.device)
outputs = self.model(inputs)[0]
f1, f2 = torch.split(outputs, [labels.size(0), labels.size(0)], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
loss += self.criterion(features, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss += loss.item()
self.train_iter += 1
progress_bar(i, len(self.loader_dict['train']), 'Lr: %.4e | Loss: %.3f' %(get_lr(self.optimizer), train_loss / (i + 1)))
if self.train_iter % self.tp_cfg['val_period'] == 0:
print('')
val_loss = self.evaluate(mode='val')
self.early_stopping(None, val_loss, self.model)
self.model.train()
if self.early_stopping.early_stop:
break
@torch.no_grad()
def evaluate(self, mode):
self.model.eval()
eval_loss = 0
for i, (inputs, labels) in enumerate(self.loader_dict[mode]):
loss = 0
inputs = inputs.to(self.device)
labels = labels.view(-1).to(self.device)
outputs = self.model(inputs)[0]
features = outputs.unsqueeze(1).repeat(1, 2, 1)
loss += self.criterion(features, labels)
eval_loss += loss.item()
progress_bar(i, len(self.loader_dict[mode]), 'Lr: %.4e | Loss: %.3f' %(get_lr(self.optimizer), eval_loss / (i + 1)))
return eval_loss
def run(self):
for epoch in range(self.tp_cfg['max_epochs']):
print('\n[INFO] Fold: {}, Epoch: {}'.format(self.fold, epoch))
self.train_one_epoch()
if self.early_stopping.early_stop:
break
def main():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--gpu', type=str, default="0", help='gpu id')
parser.add_argument('--config', type=str, help='config file path')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# For reproducibility
set_random_seed(args.seed, use_cuda=True)
with open(args.config) as config_file:
config = json.load(config_file)
config['name'] = os.path.basename(args.config).replace('.json', '')
for fold in range(1, config['dataset']['num_splits'] + 1):
trainer = OneFoldTrainer(args, fold, config)
trainer.run()
if __name__ == "__main__":
main()
| 5,454
| 36.62069
| 169
|
py
|
SleePyCo
|
SleePyCo-main/transform.py
|
import torch
import random
import numpy as np
from scipy import signal
from scipy.ndimage.interpolation import shift
class TwoTransform:
def __init__(self, transform):
self.transform = transform
def __call__(self, x):
return [self.transform(x), self.transform(x)]
class Compose:
def __init__(self, transforms, mode='full'):
self.transforms = transforms
self.mode = mode
def __call__(self, x):
if self.mode == 'random':
index = random.randint(0, len(self.transforms) - 1)
x = self.transforms[index](x)
elif self.mode == 'full':
for t in self.transforms:
x = t(x)
elif self.mode == 'shuffle':
transforms = np.random.choice(self.transforms, len(self.transforms), replace=False)
for t in transforms:
x = t(x)
else:
raise NotImplementedError
return x
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomAmplitudeScale:
def __init__(self, range=(0.5, 2.0), p=0.5):
self.range = range
self.p = p
def __call__(self, x):
if torch.rand(1) < self.p:
scale = random.uniform(self.range[0], self.range[1])
return x * scale
return x
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomDCShift:
def __init__(self, range=(-10.0, 10.0), p=0.5):
self.range = range
self.p = p
def __call__(self, x):
if torch.rand(1) < self.p:
shift = random.uniform(self.range[0], self.range[1])
return x + shift
return x
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomTimeShift:
def __init__(self, range=(-300, 300), mode='constant', cval=0.0, p=0.5):
self.range = range
self.mode = mode
self.cval = cval
self.p = p
def __call__(self, x):
if torch.rand(1) < self.p:
t_shift = random.randint(self.range[0], self.range[1])
if len(x.shape) == 2:
x = x[0]
x = shift(input=x, shift=t_shift, mode=self.mode, cval=self.cval)
x = np.expand_dims(x, axis=0)
return x
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomZeroMasking:
def __init__(self, range=(0, 300), p=0.5):
self.range = range
self.p = p
def __call__(self, x):
if torch.rand(1) < self.p:
mask_len = random.randint(self.range[0], self.range[1])
random_pos = random.randint(0, x.shape[1] - mask_len)
mask = np.concatenate([np.ones((1, random_pos)), np.zeros((1, mask_len)), np.ones((1, x.shape[1] - mask_len - random_pos))], axis=1)
return x * mask
return x
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomAdditiveGaussianNoise:
def __init__(self, range=(0.0, 0.2), p=0.5):
self.range = range
self.p = p
def __call__(self, x):
if torch.rand(1) < self.p:
sigma = random.uniform(self.range[0], self.range[1])
return x + np.random.normal(0, sigma, x.shape)
return x
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomBandStopFilter:
def __init__(self, range=(0.5, 30.0), band_width=2.0, sampling_rate=100.0, p=0.5):
self.range = range
self.band_width = band_width
self.sampling_rate = sampling_rate
self.p = p
def __call__(self, x):
if torch.rand(1) < self.p:
low_freq = random.uniform(self.range[0], self.range[1])
center_freq = low_freq + self.band_width / 2.0
b, a = signal.iirnotch(center_freq, center_freq / self.band_width, fs=self.sampling_rate)
x = signal.lfilter(b, a, x)
return x
def __repr__(self):
return self.__class__.__name__ + '()'
| 4,204
| 26.48366
| 144
|
py
|
SleePyCo
|
SleePyCo-main/loss.py
|
import torch
import torch.nn as nn
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
| 3,650
| 38.684783
| 80
|
py
|
SleePyCo
|
SleePyCo-main/utils.py
|
import os
import sys
import math
import time
import torch
import random
import numpy as np
import sklearn.metrics as skmet
from terminaltables import SingleTable
from termcolor import colored
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 25.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, ckpt_path='./checkpoints', ckpt_name='checkpoint.pth', mode='min'):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.mode = mode
if mode == 'max':
self.init_metric = 0
elif mode == 'min':
self.init_metric = -np.inf
else:
raise NotImplementedError
self.delta = delta
self.ckpt_path = ckpt_path
self.ckpt_name = ckpt_name if '.pth' in ckpt_name else ckpt_name + '.pth'
os.makedirs(self.ckpt_path, exist_ok=True)
def __call__(self, val_acc, val_loss, model):
if self.mode == 'max':
score = val_acc
val_metric = val_acc
elif self.mode == 'min':
score = -val_loss
val_metric = val_loss
else:
raise NotImplementedError
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_metric, model)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}\n')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_metric, model)
self.counter = 0
def save_checkpoint(self, val_metric, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
if self.mode == 'max':
print(f'[INFO] Validation accuracy increased ({self.init_metric:.6f} --> {val_metric:.6f}). Saving model ...\n')
elif self.mode == 'min':
print(f'[INFO] Validation loss decreased ({self.init_metric:.6f} --> {val_metric:.6f}). Saving model ...\n')
else:
raise NotImplementedError
torch.save(model.state_dict(), os.path.join(self.ckpt_path, self.ckpt_name))
self.init_metric = val_metric
def summarize_result(config, fold, y_true, y_pred, save=True):
os.makedirs('results', exist_ok=True)
y_pred_argmax = np.argmax(y_pred, 1)
result_dict = skmet.classification_report(y_true, y_pred_argmax, digits=3, output_dict=True)
cm = skmet.confusion_matrix(y_true, y_pred_argmax)
accuracy = round(result_dict['accuracy']*100, 1)
macro_f1 = round(result_dict['macro avg']['f1-score']*100, 1)
kappa = round(skmet.cohen_kappa_score(y_true, y_pred_argmax), 3)
wpr = round(result_dict['0.0']['precision']*100, 1)
wre = round(result_dict['0.0']['recall']*100, 1)
wf1 = round(result_dict['0.0']['f1-score']*100, 1)
n1pr = round(result_dict['1.0']['precision']*100, 1)
n1re = round(result_dict['1.0']['recall']*100, 1)
n1f1 = round(result_dict['1.0']['f1-score']*100, 1)
n2pr = round(result_dict['2.0']['precision']*100, 1)
n2re = round(result_dict['2.0']['recall']*100, 1)
n2f1 = round(result_dict['2.0']['f1-score']*100, 1)
n3pr = round(result_dict['3.0']['precision']*100, 1)
n3re = round(result_dict['3.0']['recall']*100, 1)
n3f1 = round(result_dict['3.0']['f1-score']*100, 1)
rpr = round(result_dict['4.0']['precision']*100, 1)
rre = round(result_dict['4.0']['recall']*100, 1)
rf1 = round(result_dict['4.0']['f1-score']*100, 1)
overall_data = [
['ACC', 'MF1', '\u03BA'],
[accuracy, macro_f1, kappa],
]
perclass_data = [
[colored('A', 'cyan') + '\\' + colored('P', 'green'), 'W', 'N1', 'N2', 'N3', 'R', 'PR', 'RE', 'F1'],
['W', cm[0][0], cm[0][1], cm[0][2], cm[0][3], cm[0][4], wpr, wre, wf1],
['N1', cm[1][0], cm[1][1], cm[1][2], cm[1][3], cm[1][4], n1pr, n1re, n1f1],
['N2', cm[2][0], cm[2][1], cm[2][2], cm[2][3], cm[2][4], n2pr, n2re, n2f1],
['N3', cm[3][0], cm[3][1], cm[3][2], cm[3][3], cm[3][4], n3pr, n3re, n3f1],
['R', cm[4][0], cm[4][1], cm[4][2], cm[4][3], cm[4][4], rpr, rre, rf1],
]
overall_dt = SingleTable(overall_data, colored('OVERALL RESULT', 'red'))
perclass_dt = SingleTable(perclass_data, colored('PER-CLASS RESULT', 'red'))
print('\n[INFO] Evaluation result from fold 1 to {}'.format(fold))
print('\n' + overall_dt.table)
print('\n' + perclass_dt.table)
print(colored(' A', 'cyan') + ': Actual Class, ' + colored('P', 'green') + ': Predicted Class' + '\n\n')
if save:
with open(os.path.join('results', config['name'] + '.txt'), 'w') as f:
f.write(
str(fold) + ' ' +
str(round(result_dict['accuracy']*100, 1)) + ' ' +
str(round(result_dict['macro avg']['f1-score']*100, 1)) + ' ' +
str(round(kappa, 3)) + ' ' +
str(round(result_dict['0.0']['f1-score']*100, 1)) + ' ' +
str(round(result_dict['1.0']['f1-score']*100, 1)) + ' ' +
str(round(result_dict['2.0']['f1-score']*100, 1)) + ' ' +
str(round(result_dict['3.0']['f1-score']*100, 1)) + ' ' +
str(round(result_dict['4.0']['f1-score']*100, 1)) + ' '
)
def set_random_seed(seed_value, use_cuda=True):
np.random.seed(seed_value) # cpu vars
torch.manual_seed(seed_value) # cpu vars
random.seed(seed_value) # Python
os.environ['PYTHONHASHSEED'] = str(seed_value) # Python hash buildin
if use_cuda:
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value) # gpu vars
torch.backends.cudnn.deterministic = True #needed
torch.backends.cudnn.benchmark = False
| 8,844
| 34.239044
| 129
|
py
|
SleePyCo
|
SleePyCo-main/loader.py
|
import os
import glob
import torch
import numpy as np
from transform import *
from torch.utils.data import Dataset
class EEGDataLoader(Dataset):
def __init__(self, config, fold, set='train'):
self.set = set
self.fold = fold
self.sr = 100
self.dset_cfg = config['dataset']
self.root_dir = self.dset_cfg['root_dir']
self.dset_name = self.dset_cfg['name']
self.num_splits = self.dset_cfg['num_splits']
self.eeg_channel = self.dset_cfg['eeg_channel']
self.seq_len = self.dset_cfg['seq_len']
self.target_idx = self.dset_cfg['target_idx']
self.training_mode = config['training_params']['mode']
self.dataset_path = os.path.join(self.root_dir, 'dset', self.dset_name, 'npz')
self.inputs, self.labels, self.epochs = self.split_dataset()
if self.training_mode == 'pretrain':
self.transform = Compose(
transforms=[
RandomAmplitudeScale(),
RandomTimeShift(),
RandomDCShift(),
RandomZeroMasking(),
RandomAdditiveGaussianNoise(),
RandomBandStopFilter(),
]
)
self.two_transform = TwoTransform(self.transform)
def __len__(self):
return len(self.epochs)
def __getitem__(self, idx):
n_sample = 30 * self.sr * self.seq_len
file_idx, idx, seq_len = self.epochs[idx]
inputs = self.inputs[file_idx][idx:idx+seq_len]
if self.set == 'train':
if self.training_mode == 'pretrain':
assert seq_len == 1
input_a, input_b = self.two_transform(inputs)
input_a = torch.from_numpy(input_a).float()
input_b = torch.from_numpy(input_b).float()
inputs = [input_a, input_b]
elif self.training_mode in ['scratch', 'fullyfinetune', 'freezefinetune']:
inputs = inputs.reshape(1, n_sample)
inputs = torch.from_numpy(inputs).float()
else:
raise NotImplementedError
else:
if not self.training_mode == 'pretrain':
inputs = inputs.reshape(1, n_sample)
inputs = torch.from_numpy(inputs).float()
labels = self.labels[file_idx][idx:idx+seq_len]
labels = torch.from_numpy(labels).long()
labels = labels[self.target_idx]
return inputs, labels
def split_dataset(self):
file_idx = 0
inputs, labels, epochs = [], [], []
data_root = os.path.join(self.dataset_path, self.eeg_channel)
data_fname_list = [os.path.basename(x) for x in sorted(glob.glob(os.path.join(data_root, '*.npz')))]
data_fname_dict = {'train': [], 'test': [], 'val': []}
split_idx_list = np.load(os.path.join('./split_idx', 'idx_{}.npy'.format(self.dset_name)), allow_pickle=True)
assert len(split_idx_list) == self.num_splits
if self.dset_name == 'Sleep-EDF-2013':
for i in range(len(data_fname_list)):
subject_idx = int(data_fname_list[i][3:5])
if subject_idx == self.fold - 1:
data_fname_dict['test'].append(data_fname_list[i])
elif subject_idx in split_idx_list[self.fold - 1]:
data_fname_dict['val'].append(data_fname_list[i])
else:
data_fname_dict['train'].append(data_fname_list[i])
elif self.dset_name == 'Sleep-EDF-2018':
for i in range(len(data_fname_list)):
subject_idx = int(data_fname_list[i][3:5])
if subject_idx in split_idx_list[self.fold - 1][self.set]:
data_fname_dict[self.set].append(data_fname_list[i])
elif self.dset_name == 'MASS' or self.dset_name == 'Physio2018' or self.dset_name == 'SHHS':
for i in range(len(data_fname_list)):
if i in split_idx_list[self.fold - 1][self.set]:
data_fname_dict[self.set].append(data_fname_list[i])
else:
raise NameError("dataset '{}' cannot be found.".format(self.dataset))
for data_fname in data_fname_dict[self.set]:
npz_file = np.load(os.path.join(data_root, data_fname))
inputs.append(npz_file['x'])
labels.append(npz_file['y'])
seq_len = self.seq_len
if self.dset_name== 'MASS' and ('-02-' in data_fname or '-04-' in data_fname or '-05-' in data_fname):
seq_len = int(self.seq_len * 1.5)
for i in range(len(npz_file['y']) - seq_len + 1):
epochs.append([file_idx, i, seq_len])
file_idx += 1
return inputs, labels, epochs
| 4,913
| 39.278689
| 117
|
py
|
SleePyCo
|
SleePyCo-main/models/iitnet.py
|
import torch.nn as nn
def conv3(in_planes, out_planes, stride=1):
return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = nn.Conv1d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm1d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IITNetBackbone(nn.Module):
def __init__(self, config):
super(IITNetBackbone, self).__init__()
block = Bottleneck
self.training_mode = config['training_params']['mode']
self.inplanes = 16
self.layers = [3, 4, 6, 3]
self.initial_layer = nn.Sequential(
nn.Conv1d(1, 16, 7, 2, 3, bias=False),
nn.BatchNorm1d(16),
nn.ReLU(),
nn.MaxPool1d(3, 2, 1))
self.layer1 = self._make_layer(block, 16, self.layers[0], stride=1, first=True)
self.layer2 = self._make_layer(block, 16, self.layers[1], stride=2)
self.layer3 = self._make_layer(block, 32, self.layers[2], stride=2)
self.layer4 = self._make_layer(block, 32, self.layers[3], stride=2)
self.maxpool = nn.MaxPool1d(3, 2, 1)
if self.training_mode == 'freezefinetune':
self.fp_dim = config['feature_pyramid']['dim']
self.num_scales = config['feature_pyramid']['num_scales']
self.conv_c5 = nn.Conv1d(128, self.fp_dim, 1, 1, 0)
assert self.num_scales == 1
if config['backbone']['init_weights']:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, first=False):
downsample = None
if (stride != 1 and first is False) or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes, planes * block.expansion, 1, stride, bias=False),
nn.BatchNorm1d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
out = []
c1 = self.initial_layer(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(self.maxpool(c3))
c5 = self.layer4(c4)
if self.training_mode == 'pretrain':
out.append(c5)
elif self.training_mode in ['scratch', 'fullyfinetune', 'freezefinetune']:
p5 = self.conv_c5(c5)
out.append(p5)
return out
| 4,111
| 31.896
| 96
|
py
|
SleePyCo
|
SleePyCo-main/models/xsleepnet.py
|
import torch.nn as nn
class XSleepNetFeature(nn.Module):
def __init__(self, config):
super(XSleepNetFeature, self).__init__()
self.training_mode = config['training_params']['mode']
# architecture
self.conv1 = self.make_layers(1, 16)
self.conv2 = self.make_layers(16, 16)
self.conv3 = self.make_layers(16, 32)
self.conv4 = self.make_layers(32, 32)
self.conv5 = self.make_layers(32, 64)
self.conv6 = self.make_layers(64, 64)
self.conv7 = self.make_layers(64, 128)
self.conv8 = self.make_layers(128, 128)
self.conv9 = self.make_layers(128, 256)
if self.training_mode == 'freezefinetune':
self.fp_dim = config['feature_pyramid']['dim']
self.num_scales = config['feature_pyramid']['num_scales']
self.conv_c5 = nn.Conv1d(256, self.fp_dim, 1, 1, 0)
if self.num_scales > 1:
self.conv_c4 = nn.Conv1d(128, self.fp_dim, 1, 1, 0)
if self.num_scales > 2:
self.conv_c3 = nn.Conv1d(128, self.fp_dim, 1, 1, 0)
if config['backbone']['init_weights']:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def make_layers(self, in_channels, out_channels):
layer = [nn.Conv1d(in_channels, out_channels, 31, 2, 15)]
layer.append(nn.BatchNorm1d(out_channels))
layer.append(nn.PReLU())
return nn.Sequential(*layer)
def forward(self, x):
out = []
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
c1 = self.conv5(x)
c2 = self.conv6(c1)
c3 = self.conv7(c2)
c4 = self.conv8(c3)
c5 = self.conv9(c4)
if self.training_mode == 'pretrain':
out.append(c5)
elif self.training_mode in ['scratch', 'fullyfinetune', 'freezefinetune']:
p5 = self.conv_c5(c5)
out.append(p5)
if self.num_scales > 1:
p4 = self.conv_c4(c4)
out.append(p4)
if self.num_scales > 2:
p3 = self.conv_c3(c3)
out.append(p3)
return out
| 2,631
| 32.74359
| 86
|
py
|
SleePyCo
|
SleePyCo-main/models/utils.py
|
import torch.utils.data
from torch.nn import functional as F
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.functional import pad
from torch.nn.modules import Module
from torch.nn.modules.utils import _single, _pair, _triple
class _ConvNd(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding,
dilation, transposed, output_padding, groups, bias, weight=None):
super(_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters(weight)
def reset_parameters(self, weight):
if weight == None:
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
else:
self.weight.data = torch.FloatTensor(weight)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
class Conv1d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding='VALID', dilation=1, groups=1, bias=True, weight=None):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(Conv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, weight)
def forward(self, input):
return conv1d_same_padding(input, self.weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
# custom con2d, because pytorch don't have "padding='same'" option.
def conv1d_same_padding(input, weight, bias=None, stride=1, padding='VALID', dilation=1, groups=1):
def check_format(*argv):
argv_format = []
for i in range(len(argv)):
if type(argv[i]) is int:
argv_format.append((argv[i], argv[i]))
elif hasattr(argv[i], "__getitem__"):
argv_format.append(tuple(argv[i]))
else:
raise TypeError('all input should be int or list-type, now is {}'.format(argv[i]))
return argv_format
stride, dilation = check_format(stride, dilation)
if padding == 'SAME':
padding = 0
input_rows = input.size(2)
filter_rows = weight.size(2)
out_rows = (input_rows + stride[0] - 1) // stride[0]
padding_rows = max(0, (out_rows - 1) * stride[0] +
(filter_rows - 1) * dilation[0] + 1 - input_rows)
rows_odd = padding_rows % 2
# input_cols = input.size(3)
# filter_cols = weight.size(3)
# out_cols = (input_cols + stride[1] - 1) // stride[1]
# padding_cols = max(0, (out_cols - 1) * stride[1] +
# (filter_cols - 1) * dilation[1] + 1 - input_cols)
# cols_odd = padding_cols % 2
input = pad(input, [padding_rows // 2, padding_rows // 2 + int(rows_odd)])
elif padding == 'VALID':
padding = 0
elif type(padding) != int:
raise ValueError('Padding should be SAME, VALID or specific integer, but not {}.'.format(padding))
return F.conv1d(input, weight, bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
class _MaxPoolNd(Module):
def __init__(self, kernel_size, stride=None, padding='VALID', dilation=1,
return_indices=False, ceil_mode=False):
super(_MaxPoolNd, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.dilation = dilation
self.return_indices = return_indices
self.ceil_mode = ceil_mode
def extra_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
class MaxPool1d(_MaxPoolNd):
def forward(self, input):
return maxpool1d_same_padding(input, self.kernel_size, self.stride, self.padding,
self.dilation, self.return_indices, self.ceil_mode)
def extra_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
def maxpool1d_same_padding(input, kernel_size, stride=None, padding='VALID', dilation=1, return_indices=False, ceil_mode=False):
if stride is None:
_stride, dilation = [kernel_size], [dilation]
else:
_stride, dilation = [stride], [dilation]
if padding == 'SAME':
padding = 0
input_rows = input.size(2)
filter_rows = kernel_size
out_rows = (input_rows + _stride[0] - 1) // _stride[0]
padding_rows = max(0, (out_rows - 1) * _stride[0] +
(filter_rows - 1) * dilation[0] + 1 - input_rows)
rows_odd = padding_rows % 2
# input_cols = input.size(3)
# filter_cols = weight.size(3)
# out_cols = (input_cols + _stride[1] - 1) // _stride[1]
# padding_cols = max(0, (out_cols - 1) * _stride[1] +
# (filter_cols - 1) * dilation[1] + 1 - input_cols)
# cols_odd = padding_cols % 2
input = pad(input, [padding_rows // 2, padding_rows // 2 + int(rows_odd)])
elif padding == 'VALID':
padding = 0
elif type(padding) != int:
raise ValueError('Padding should be SAME, VALID or specific integer, but not {}.'.format(padding))
return F.max_pool1d(input, kernel_size, stride, padding, dilation, return_indices, ceil_mode)
| 7,340
| 37.434555
| 128
|
py
|
SleePyCo
|
SleePyCo-main/models/main_model.py
|
import torch.nn as nn
import torch.nn.functional as F
from .sleepyco import SleePyCoBackbone
from .xsleepnet import XSleepNetFeature
from .iitnet import IITNetBackbone
from .utime import UTimeEncoder
from .deepsleepnet import DeepSleepNetFeature
from .classifiers import get_classifier
last_chn_dict = {
'SleePyCo': 256,
'XSleepNet': 256,
'IITNet': 128,
'UTime': 256,
'DeepSleepNet': 128
}
class MainModel(nn.Module):
def __init__(self, config):
super(MainModel, self).__init__()
self.cfg = config
self.bb_cfg = config['backbone']
self.training_mode = config['training_params']['mode']
if self.bb_cfg['name'] == 'SleePyCo':
self.feature = SleePyCoBackbone(self.cfg)
elif self.bb_cfg['name'] == 'XSleepNet':
self.feature = XSleepNetFeature(self.cfg)
elif self.bb_cfg['name'] == 'UTime':
self.feature = UTimeEncoder(self.cfg)
elif self.bb_cfg['name'] == 'IITNet':
self.feature = IITNetBackbone(self.cfg)
elif self.bb_cfg['name'] == 'DeepSleepNet':
self.feature = DeepSleepNetFeature(self.cfg)
else:
raise NotImplementedError('backbone not supported: {}'.format(config['backbone']['name']))
if self.bb_cfg['dropout']:
self.dropout = nn.Dropout(p=0.5)
if self.training_mode == 'pretrain':
proj_dim = self.cfg['proj_head']['dim']
if config['proj_head']['name'] == 'Linear':
self.head = nn.Sequential(
nn.AdaptiveAvgPool1d(1),
nn.Flatten(),
nn.Linear(last_chn_dict[config['backbone']['name']], proj_dim)
)
elif config['proj_head']['name'] == 'MLP':
self.head = nn.Sequential(
nn.AdaptiveAvgPool1d(1),
nn.Flatten(),
nn.Linear(last_chn_dict[config['backbone']['name']], proj_dim),
nn.ReLU(inplace=True),
nn.Linear(proj_dim, proj_dim)
)
else:
raise NotImplementedError('head not supported: {}'.format(config['proj_head']['name']))
print('[INFO] Number of params of backbone: ', sum(p.numel() for p in self.feature.parameters() if p.requires_grad))
print('[INFO] Number of params of proj_head: ', sum(p.numel() for p in self.head.parameters() if p.requires_grad))
elif self.training_mode in ['scratch', 'fullfinetune', 'freezefinetune']:
self.classifier = get_classifier(config)
print('[INFO] Number of params of backbone: ', sum(p.numel() for p in self.feature.parameters() if p.requires_grad))
print('[INFO] Number of params of classifier: ', sum(p.numel() for p in self.classifier.parameters() if p.requires_grad))
else:
raise NotImplementedError('head not supported: {}'.format(config['training_params']['mode']))
def get_max_len(self, features):
len_list = []
for feature in features:
len_list.append(feature.shape[1])
return max(len_list)
def forward(self, x):
outputs = []
features = self.feature(x)
for feature in features:
if self.bb_cfg['dropout']:
feature = self.dropout(feature)
if self.training_mode == 'pretrain':
outputs.append(F.normalize(self.head(feature)))
elif self.training_mode in ['scratch', 'fullfinetune', 'freezefinetune']:
feature = feature.transpose(1, 2)
output = self.classifier(feature)
outputs.append(output) # (B, L, H)
else:
raise NotImplementedError
return outputs
| 3,898
| 36.490385
| 133
|
py
|
SleePyCo
|
SleePyCo-main/models/classifiers.py
|
import math
import torch
import torch.nn as nn
feature_len_dict = {
'SleePyCo': [
[5, 24, 120],
[10, 48, 240],
[15, 72, 360],
[20, 96, 480],
[24, 120, 600],
[29, 144, 720],
[34, 168, 840],
[39, 192, 960],
[44, 216, 1080],
[48, 240, 1200]],
}
class PlainRNN(nn.Module):
def __init__(self, config):
super(PlainRNN, self).__init__()
self.cfg = config['classifier']
self.num_classes = config['num_classes']
self.input_dim = config['comp_chn']
self.hidden_dim = config['hidden_dim']
self.num_layers = config['num_rnn_layers']
self.bidirectional = config['bidirectional']
# architecture
self.rnn = nn.RNN(
input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True,
bidirectional=self.bidirectional
)
self.fc = nn.Linear(self.hidden_dim * 2 if config['bidirectional'] else self.hidden_dim, self.num_classes)
def init_hidden(self, x):
h0 = torch.zeros((self.num_layers * (2 if self.bidirectional else 1), x.size(0), self.hidden_dim)).cuda()
return h0
def forward(self, x):
hidden = self.init_hidden(x)
rnn_output, hidden = self.rnn(x, hidden)
if self.bidirectional:
output_f = rnn_output[:, -1, :self.hidden_dim]
output_b = rnn_output[:, 0, self.hidden_dim:]
output = torch.cat((output_f, output_b), dim=1)
else:
output = rnn_output[:, -1, :]
output = self.fc(output)
return output
class PlainGRU(PlainRNN):
def __init__(self, config):
super(PlainGRU, self).__init__(config)
self.rnn = nn.GRU(
input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True,
bidirectional=self.bidirectional
)
class PlainLSTM(PlainRNN):
def __init__(self, config):
super(PlainLSTM, self).__init__(config)
self.rnn = nn.LSTM(
input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True,
bidirectional=self.bidirectional
)
def init_hidden(self, x):
h0 = torch.zeros((self.num_layers * (2 if self.bidirectional else 1), x.size(0), self.hidden_dim)).cuda()
c0 = torch.zeros((self.num_layers * (2 if self.bidirectional else 1), x.size(0), self.hidden_dim)).cuda()
return h0, c0
class AttRNN(PlainRNN):
def __init__(self, config):
super(AttRNN, self).__init__(config)
# architecture
self.fc = nn.Linear(self.hidden_dim, self.num_classes)
self.w_ha = nn.Linear(self.hidden_dim * 2 if config['bidirectional'] else self.hidden_dim, self.hidden_dim, bias=True)
self.w_att = nn.Linear(self.hidden_dim, 1, bias=False)
def forward(self, x):
hidden = self.init_hidden(x)
rnn_output, hidden = self.rnn(x, hidden)
a_states = self.w_ha(rnn_output)
alpha = torch.softmax(self.w_att(a_states), dim=1).view(x.size(0), 1, x.size(1))
weighted_sum = torch.bmm(alpha, a_states)
output = weighted_sum.view(x.size(0), -1)
output = self.fc(output)
return output
class AttGRU(AttRNN):
def __init__(self, config):
super(AttGRU, self).__init__(config)
self.rnn = nn.GRU(
input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True,
bidirectional=self.bidirectional
)
class AttLSTM(AttRNN):
def __init__(self, config):
super(AttLSTM, self).__init__(config)
self.rnn = nn.LSTM(
input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True,
bidirectional=self.bidirectional
)
def init_hidden(self, x):
h0 = torch.zeros((self.num_layers * (2 if self.bidirectional else 1), x.size(0), self.hidden_dim)).cuda()
c0 = torch.zeros((self.num_layers * (2 if self.bidirectional else 1), x.size(0), self.hidden_dim)).cuda()
return h0, c0
class PositionalEncoding(nn.Module):
def __init__(self, config, in_features, out_features, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.cfg = config['classifier']['pos_enc']
if self.cfg['dropout']:
self.dropout = nn.Dropout(p=dropout)
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self.act_fn = nn.PReLU()
self.max_len = feature_len_dict[config['backbone']['name']][config['dataset']['seq_len'] - 1][config['feature_pyramid']['num_scales'] - 1]
print('[INFO] Maximum length of pos_enc: {}'.format(self.max_len))
pe = torch.zeros(self.max_len, out_features)
position = torch.arange(0, self.max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, out_features, 2).float() * (-math.log(10000.0) / out_features))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = self.act_fn(self.fc(x))
hop = self.max_len // x.size(0)
pe = self.pe[hop//2::hop, :]
if pe.shape[0] != x.size(0):
pe = pe[:x.size(0), :]
x = x + pe
if self.cfg['dropout']:
x = self.dropout(x)
return x
class Transformer(nn.Module):
def __init__(self, config, nheads, num_encoder_layers, pool='mean'):
super(Transformer, self).__init__()
self.cfg = config['classifier']
self.model_dim = self.cfg['model_dim']
self.feedforward_dim = self.cfg['feedforward_dim']
self.in_features = config['feature_pyramid']['dim']
self.out_features = self.cfg['model_dim']
self.pos_encoding = PositionalEncoding(config, self.in_features, self.out_features)
self.transformer_layer = nn.TransformerEncoderLayer(
d_model=self.model_dim,
nhead=nheads,
dim_feedforward=self.feedforward_dim,
dropout=0.1 if self.cfg['dropout'] else 0.0
)
self.transformer = nn.TransformerEncoder(self.transformer_layer, num_layers=num_encoder_layers)
self.pool = pool
if self.cfg['dropout']:
self.dropout = nn.Dropout(p=0.5)
if pool == 'attn':
self.w_ha = nn.Linear(self.model_dim, self.model_dim, bias=True)
self.w_at = nn.Linear(self.model_dim, 1, bias=False)
self.fc = nn.Linear(self.model_dim, self.cfg['num_classes'])
def forward(self, x):
x = x.transpose(0, 1)
x = self.pos_encoding(x)
x = self.transformer(x)
x = x.transpose(0, 1)
if self.pool == 'mean':
x = x.mean(dim=1)
elif self.pool == 'last':
x = x[:, -1]
elif self.pool == 'attn':
a_states = torch.tanh(self.w_ha(x))
alpha = torch.softmax(self.w_at(a_states), dim=1).view(x.size(0), 1, x.size(1))
x = torch.bmm(alpha, a_states).view(x.size(0), -1)
elif self.pool == None:
x = x
else:
raise NotImplementedError
if self.cfg['dropout']:
x = self.dropout(x)
out = self.fc(x)
return out
def get_classifier(config):
classifier_name = config['classifier']['name']
if classifier_name == 'PlainRNN':
classifier = PlainRNN(config)
elif classifier_name == 'AttentionRNN':
classifier = AttRNN(config)
if classifier_name == 'PlainLSTM':
classifier = PlainLSTM(config)
elif classifier_name == 'AttentionLSTM':
classifier = AttLSTM(config)
elif classifier_name == 'PlainGRU':
classifier = PlainGRU(config)
elif classifier_name == 'AttentionGRU':
classifier = AttGRU(config)
elif classifier_name == 'Transformer':
classifier = Transformer(config, nheads=8, num_encoder_layers=6, pool=config['classifier']['pool'])
return classifier
| 8,588
| 30.811111
| 146
|
py
|
SleePyCo
|
SleePyCo-main/models/sleepyco.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SleePyCoBackbone(nn.Module):
def __init__(self, config):
super(SleePyCoBackbone, self).__init__()
self.training_mode = config['training_params']['mode']
# architecture
self.init_layer = self.make_layers(in_channels=1, out_channels=64, n_layers=2, maxpool_size=None, first=True)
self.layer1 = self.make_layers(in_channels=64, out_channels=128, n_layers=2, maxpool_size=5)
self.layer2 = self.make_layers(in_channels=128, out_channels=192, n_layers=3, maxpool_size=5)
self.layer3 = self.make_layers(in_channels=192, out_channels=256, n_layers=3, maxpool_size=5)
self.layer4 = self.make_layers(in_channels=256, out_channels=256, n_layers=3, maxpool_size=5)
if self.training_mode == 'freezefinetune':
self.fp_dim = config['feature_pyramid']['dim']
self.num_scales = config['feature_pyramid']['num_scales']
self.conv_c5 = nn.Conv1d(256, self.fp_dim, 1, 1, 0)
if self.num_scales > 1:
self.conv_c4 = nn.Conv1d(256, self.fp_dim, 1, 1, 0)
if self.num_scales > 2:
self.conv_c3 = nn.Conv1d(192, self.fp_dim, 1, 1, 0)
if config['backbone']['init_weights']:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def make_layers(self, in_channels, out_channels, n_layers, maxpool_size, first=False):
layers = []
layers = layers + [MaxPool1d(maxpool_size)] if not first else layers
for i in range(n_layers):
conv1d = nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1)
layers += [conv1d, nn.BatchNorm1d(out_channels)]
if i == n_layers - 1:
layers += [ChannelGate(in_channels)]
layers += [nn.PReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = []
c1 = self.init_layer(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
if self.training_mode == 'pretrain':
out.append(c5)
elif self.training_mode in ['scratch', 'fullyfinetune', 'freezefinetune']:
p5 = self.conv_c5(c5)
out.append(p5)
if self.num_scales > 1:
p4 = self.conv_c4(c4)
out.append(p4)
if self.num_scales > 2:
p3 = self.conv_c3(c3)
out.append(p3)
return out
class MaxPool1d(nn.Module):
def __init__(self, maxpool_size):
super(MaxPool1d, self).__init__()
self.maxpool_size = maxpool_size
self.maxpool = nn.MaxPool1d(kernel_size=maxpool_size, stride=maxpool_size)
def forward(self, x):
_, _, n_samples = x.size()
if n_samples % self.maxpool_size != 0:
pad_size = self.maxpool_size - (n_samples % self.maxpool_size)
if pad_size % 2 != 0:
left_pad = pad_size // 2
right_pad = pad_size // 2 + 1
else:
left_pad = pad_size // 2
right_pad = pad_size // 2
x = F.pad(x, (left_pad, right_pad), mode='constant')
x = self.maxpool(x)
return x
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm1d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
nn.Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type=='avg':
avg_pool = F.avg_pool1d(x, x.size(2), stride=x.size(2))
channel_att_raw = self.mlp(avg_pool)
elif pool_type=='max':
max_pool = F.max_pool1d(x, x.size(2), stride=x.size(2))
channel_att_raw = self.mlp( max_pool )
elif pool_type=='lp':
lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( lp_pool )
elif pool_type=='lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp( lse_pool )
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = F.sigmoid(channel_att_sum).unsqueeze(2).expand_as(x)
return x * scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
| 6,271
| 37.012121
| 154
|
py
|
SleePyCo
|
SleePyCo-main/models/deepsleepnet.py
|
import torch
import torch.nn as nn
from .utils import Conv1d, MaxPool1d
class DeepSleepNetFeature(nn.Module):
def __init__(self, config):
super(DeepSleepNetFeature, self).__init__()
self.chn = 64
# architecture
self.dropout = nn.Dropout(p=0.5)
self.path1 = nn.Sequential(Conv1d(1, self.chn, 50, 6, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn),
nn.ReLU(inplace=True),
MaxPool1d(8, padding='SAME'),
nn.Dropout(),
Conv1d(self.chn, self.chn*2, 8, 1, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn*2),
nn.ReLU(inplace=True),
Conv1d(self.chn*2, self.chn*2, 8, 1, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn * 2),
nn.ReLU(inplace=True),
Conv1d(self.chn*2, self.chn*2, 8, 1, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn*2),
nn.ReLU(inplace=True),
MaxPool1d(4, padding='SAME')
)
self.path2 = nn.Sequential(Conv1d(1, self.chn, 400, 50, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn),
nn.ReLU(inplace=True),
MaxPool1d(4, padding='SAME'),
nn.Dropout(),
Conv1d(self.chn, self.chn*2, 8, 1, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn*2),
nn.ReLU(inplace=True),
Conv1d(self.chn*2, self.chn*2, 8, 1, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn * 2),
nn.ReLU(inplace=True),
Conv1d(self.chn*2, self.chn*2, 8, 1, padding='SAME', bias=False),
nn.BatchNorm1d(self.chn*2),
nn.ReLU(inplace=True),
MaxPool1d(2, padding='SAME'))
self.compress = nn.Conv1d(self.chn*4, 128, 1, 1, 0)
self.smooth = nn.Conv1d(128, 128, 3, 1, 1)
if self.training_mode == 'freezefinetune':
self.fp_dim = config['feature_pyramid']['dim']
self.num_scales = config['feature_pyramid']['num_scales']
self.conv_c5 = nn.Conv1d(128, self.fp_dim, 1, 1, 0)
assert self.num_scales == 1
if config['backbone']['init_weights']:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
out = []
x1 = self.path1(x) # path 1
x2 = self.path2(x) # path 2
x2 = torch.nn.functional.interpolate(x2, x1.size(2))
c5 = self.smooth(self.compress(torch.cat([x1, x2], dim=1)))
if self.training_mode == 'pretrain':
out.append(c5)
elif self.training_mode in ['scratch', 'fullyfinetune', 'freezefinetune']:
p5 = self.conv_c5(c5)
out.append(p5)
return out
| 3,855
| 44.904762
| 100
|
py
|
SleePyCo
|
SleePyCo-main/models/utime.py
|
import torch
import torch.nn as nn
from .utils import Conv1d
class ConvUnit(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation):
super(ConvUnit, self).__init__()
self.conv = Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation
)
self.bn = nn.BatchNorm1d(num_features=out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class UTimeEncoder(nn.Module):
def __init__(self, config):
super(UTimeEncoder, self).__init__()
torch.backends.cudnn.deterministic = False
self.training_mode = config['training_params']['mode']
self.conv1_1 = ConvUnit(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.conv1_2 = ConvUnit(in_channels=16, out_channels=16, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.mp1 = nn.MaxPool1d(kernel_size=8)
self.conv2_1 = ConvUnit(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.conv2_2 = ConvUnit(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.mp2 = nn.MaxPool1d(kernel_size=6)
self.conv3_1 = ConvUnit(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.conv3_2 = ConvUnit(in_channels=64, out_channels=64, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.mp3 = nn.MaxPool1d(kernel_size=4)
self.conv4_1 = ConvUnit(in_channels=64, out_channels=128, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.conv4_2 = ConvUnit(in_channels=128, out_channels=128, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.mp4 = nn.MaxPool1d(kernel_size=2)
self.conv5_1 = ConvUnit(in_channels=128, out_channels=256, kernel_size=5, stride=1, padding='SAME', dilation=2)
self.conv5_2 = ConvUnit(in_channels=256, out_channels=256, kernel_size=5, stride=1, padding='SAME', dilation=2)
if self.training_mode == 'freezefinetune':
self.fp_dim = config['feature_pyramid']['dim']
self.num_scales = config['feature_pyramid']['num_scales']
self.conv_c5 = nn.Conv1d(256, self.fp_dim, 1, 1, 0)
if self.n_anchor > 1:
self.conv_c4 = nn.Conv1d(128, self.fp_dim, 1, 1, 0)
if self.n_anchor > 2:
self.conv_c3 = nn.Conv1d(64, self.fp_dim, 1, 1, 0)
if config['backbone']['init_weights']:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
out = []
x = self.conv1_1(x)
c1 = self.conv1_2(x)
x = self.mp1(c1)
x = self.conv2_1(x)
c2 = self.conv2_2(x)
x = self.mp2(c2)
x = self.conv3_1(x)
c3 = self.conv3_2(x)
x = self.mp3(c3)
x = self.conv4_1(x)
c4 = self.conv4_2(x)
x = self.mp4(c4)
x = self.conv5_1(x)
c5 = self.conv5_2(x)
if self.training_mode == 'pretrain':
out.append(c5)
elif self.training_mode in ['scratch', 'fullyfinetune', 'freezefinetune']:
p5 = self.conv_c5(c5)
out.append(p5)
if self.num_scales > 1:
p4 = self.conv_c4(c4)
out.append(p4)
if self.num_scales > 2:
p3 = self.conv_c3(c3)
out.append(p3)
return out
| 4,199
| 37.181818
| 119
|
py
|
SleePyCo
|
SleePyCo-main/dset/Sleep-EDF-2013/download_sleep-edf-2013.py
|
import os
os.makedirs('./edf', exist_ok=True)
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4001E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4001EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4002E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4002EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4011E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4011EH-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4012E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4012EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4021E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4021EH-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4022E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4022EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4031E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4031EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4032E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4032EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4041E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4041EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4042E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4042EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4051E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4051EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4052E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4052EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4061E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4061EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4062E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4062EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4071E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4071EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4072E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4072EH-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4081E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4081EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4082E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4082EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4091E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4091EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4092E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4092EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4101E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4101EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4102E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4102EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4111E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4111EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4112E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4112EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4121E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4121EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4122E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4122EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4131E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4131EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4141E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4141EU-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4142E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4142EU-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4151E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4151EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4152E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4152EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4161E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4161EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4162E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4162EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4171E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4171EU-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4172E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4172EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4181E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4181EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4182E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4182EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4191E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4191EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4192E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4192EV-Hypnogram.edf -P ./edf')
| 9,329
| 112.780488
| 121
|
py
|
SleePyCo
|
SleePyCo-main/dset/Sleep-EDF-2013/prepare_sleep-edf-2013.py
|
import os
import glob
import ntpath
import logging
import argparse
import pyedflib
import numpy as np
# Label values
W = 0
N1 = 1
N2 = 2
N3 = 3
REM = 4
MOVE = 5
UNK = 6
stage_dict = {
"W": W,
"N1": N1,
"N2": N2,
"N3": N3,
"REM": REM,
"MOVE": MOVE,
"UNK": UNK
}
# Have to manually define based on the dataset
ann2label = {
"Sleep stage W": 0,
"Sleep stage 1": 1,
"Sleep stage 2": 2,
"Sleep stage 3": 3, "Sleep stage 4": 3, # Follow AASM Manual
"Sleep stage R": 4,
"Sleep stage ?": 6,
"Movement time": 5
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="./edf",
help="File path to the Sleep-EDF dataset.")
parser.add_argument("--output_dir", type=str, default="./npz",
help="Directory where to save outputs.")
parser.add_argument("--select_ch", type=str, default="EEG Fpz-Cz",
help="Name of the channel in the dataset.")
parser.add_argument("--log_file", type=str, default="info_ch_extract.log",
help="Log file.")
args = parser.parse_args()
# Output dir
args.output_dir = os.path.join(args.output_dir, args.select_ch.split(' ')[-1])
os.makedirs(args.output_dir, exist_ok=True)
args.log_file = os.path.join(os.path.join(args.output_dir, args.log_file))
# Create logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(args.log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Select channel
select_ch = args.select_ch
# Read raw and annotation from EDF files
psg_fnames = glob.glob(os.path.join(args.data_dir, "*PSG.edf"))
ann_fnames = glob.glob(os.path.join(args.data_dir, "*Hypnogram.edf"))
psg_fnames.sort()
ann_fnames.sort()
psg_fnames = np.asarray(psg_fnames)
ann_fnames = np.asarray(ann_fnames)
for i in range(len(psg_fnames)):
logger.info("Loading ...")
logger.info("Signal file: {}".format(psg_fnames[i]))
logger.info("Annotation file: {}".format(ann_fnames[i]))
psg_f = pyedflib.EdfReader(psg_fnames[i])
ann_f = pyedflib.EdfReader(ann_fnames[i])
assert psg_f.getStartdatetime() == ann_f.getStartdatetime()
start_datetime = psg_f.getStartdatetime()
logger.info("Start datetime: {}".format(str(start_datetime)))
file_duration = psg_f.getFileDuration()
logger.info("File duration: {} sec".format(file_duration))
epoch_duration = psg_f.datarecord_duration
if psg_f.datarecord_duration == 60: # Fix problems of SC4362F0-PSG.edf, SC4362FC-Hypnogram.edf
epoch_duration = epoch_duration / 2
logger.info("Epoch duration: {} sec (changed from 60 sec)".format(epoch_duration))
else:
logger.info("Epoch duration: {} sec".format(epoch_duration))
# Extract signal from the selected channel
ch_names = psg_f.getSignalLabels()
ch_samples = psg_f.getNSamples()
select_ch_idx = -1
for s in range(psg_f.signals_in_file):
if ch_names[s] == select_ch:
select_ch_idx = s
break
if select_ch_idx == -1:
raise Exception("Channel not found.")
sampling_rate = 100.0
n_epoch_samples = int(epoch_duration * sampling_rate)
signals = psg_f.readSignal(select_ch_idx).reshape(-1, n_epoch_samples)
logger.info("Select channel: {}".format(select_ch))
logger.info("Select channel samples: {}".format(ch_samples[select_ch_idx]))
logger.info("Sample rate: {}".format(sampling_rate))
# Sanity check
n_epochs = psg_f.datarecords_in_file
if psg_f.datarecord_duration == 60: # Fix problems of SC4362F0-PSG.edf, SC4362FC-Hypnogram.edf
n_epochs = n_epochs * 2
assert len(signals) == n_epochs, f"signal: {signals.shape} != {n_epochs}"
# Generate labels from onset and duration annotation
labels = []
total_duration = 0
ann_onsets, ann_durations, ann_stages = ann_f.readAnnotations()
for a in range(len(ann_stages)):
onset_sec = int(ann_onsets[a])
duration_sec = int(ann_durations[a])
ann_str = "".join(ann_stages[a])
# Sanity check
assert onset_sec == total_duration
# Get label value
label = ann2label[ann_str]
# Compute # of epoch for this stage
if duration_sec % epoch_duration != 0:
logger.info(f"Something wrong: {duration_sec} {epoch_duration}")
raise Exception(f"Something wrong: {duration_sec} {epoch_duration}")
duration_epoch = int(duration_sec / epoch_duration)
# Generate sleep stage labels
label_epoch = np.ones(duration_epoch, dtype=np.int) * label
labels.append(label_epoch)
total_duration += duration_sec
logger.info("Include onset:{}, duration:{}, label:{} ({})".format(
onset_sec, duration_sec, label, ann_str
))
labels = np.hstack(labels)
# Remove annotations that are longer than the recorded signals
labels = labels[:len(signals)]
# Get epochs and their corresponding labels
x = signals.astype(np.float32)
y = labels.astype(np.int32)
# Select only sleep periods
w_edge_mins = 30
nw_idx = np.where(y != stage_dict["W"])[0]
start_idx = nw_idx[0] - (w_edge_mins * 2)
end_idx = nw_idx[-1] + (w_edge_mins * 2)
if start_idx < 0: start_idx = 0
if end_idx >= len(y): end_idx = len(y) - 1
select_idx = np.arange(start_idx, end_idx+1)
logger.info("Data before selection: {}, {}".format(x.shape, y.shape))
x = x[select_idx]
y = y[select_idx]
logger.info("Data after selection: {}, {}".format(x.shape, y.shape))
# Remove movement and unknown
move_idx = np.where(y == stage_dict["MOVE"])[0]
unk_idx = np.where(y == stage_dict["UNK"])[0]
if len(move_idx) > 0 or len(unk_idx) > 0:
remove_idx = np.union1d(move_idx, unk_idx)
logger.info("Remove irrelavant stages")
logger.info(" Movement: ({}) {}".format(len(move_idx), move_idx))
logger.info(" Unknown: ({}) {}".format(len(unk_idx), unk_idx))
logger.info(" Remove: ({}) {}".format(len(remove_idx), remove_idx))
logger.info(" Data before removal: {}, {}".format(x.shape, y.shape))
select_idx = np.setdiff1d(np.arange(len(x)), remove_idx)
x = x[select_idx]
y = y[select_idx]
logger.info(" Data after removal: {}, {}".format(x.shape, y.shape))
# Save
filename = ntpath.basename(psg_fnames[i]).replace("-PSG.edf", ".npz")
save_dict = {
"x": x,
"y": y,
"fs": sampling_rate,
"ch_label": select_ch,
"start_datetime": start_datetime,
"file_duration": file_duration,
"epoch_duration": epoch_duration,
"n_all_epochs": n_epochs,
"n_epochs": len(x),
}
np.savez(os.path.join(args.output_dir, filename), **save_dict)
logger.info("\n=======================================\n")
if __name__ == "__main__":
main()
| 7,734
| 35.314554
| 102
|
py
|
SleePyCo
|
SleePyCo-main/dset/Sleep-EDF-2018/prepare_sleep-edf-2018.py
|
import os
import glob
import ntpath
import logging
import argparse
import pyedflib
import numpy as np
# Label values
W = 0
N1 = 1
N2 = 2
N3 = 3
REM = 4
MOVE = 5
UNK = 6
stage_dict = {
"W": W,
"N1": N1,
"N2": N2,
"N3": N3,
"REM": REM,
"MOVE": MOVE,
"UNK": UNK
}
# Have to manually define based on the dataset
ann2label = {
"Sleep stage W": 0,
"Sleep stage 1": 1,
"Sleep stage 2": 2,
"Sleep stage 3": 3, "Sleep stage 4": 3, # Follow AASM Manual
"Sleep stage R": 4,
"Sleep stage ?": 6,
"Movement time": 5
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="./edf",
help="File path to the Sleep-EDF dataset.")
parser.add_argument("--output_dir", type=str, default="./npz",
help="Directory where to save outputs.")
parser.add_argument("--select_ch", type=str, default="EEG Fpz-Cz",
help="Name of the channel in the dataset.")
parser.add_argument("--log_file", type=str, default="info_ch_extract.log",
help="Log file.")
args = parser.parse_args()
# Output dir
args.output_dir = os.path.join(args.output_dir, args.select_ch.split(' ')[-1])
os.makedirs(args.output_dir, exist_ok=True)
args.log_file = os.path.join(os.path.join(args.output_dir, args.log_file))
# Create logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(args.log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Select channel
select_ch = args.select_ch
# Read raw and annotation from EDF files
psg_fnames = glob.glob(os.path.join(args.data_dir, "*PSG.edf"))
ann_fnames = glob.glob(os.path.join(args.data_dir, "*Hypnogram.edf"))
psg_fnames.sort()
ann_fnames.sort()
psg_fnames = np.asarray(psg_fnames)
ann_fnames = np.asarray(ann_fnames)
for i in range(len(psg_fnames)):
logger.info("Loading ...")
logger.info("Signal file: {}".format(psg_fnames[i]))
logger.info("Annotation file: {}".format(ann_fnames[i]))
psg_f = pyedflib.EdfReader(psg_fnames[i])
ann_f = pyedflib.EdfReader(ann_fnames[i])
assert psg_f.getStartdatetime() == ann_f.getStartdatetime()
start_datetime = psg_f.getStartdatetime()
logger.info("Start datetime: {}".format(str(start_datetime)))
file_duration = psg_f.getFileDuration()
logger.info("File duration: {} sec".format(file_duration))
epoch_duration = psg_f.datarecord_duration
if psg_f.datarecord_duration == 60: # Fix problems of SC4362F0-PSG.edf, SC4362FC-Hypnogram.edf
epoch_duration = epoch_duration / 2
logger.info("Epoch duration: {} sec (changed from 60 sec)".format(epoch_duration))
else:
logger.info("Epoch duration: {} sec".format(epoch_duration))
# Extract signal from the selected channel
ch_names = psg_f.getSignalLabels()
ch_samples = psg_f.getNSamples()
select_ch_idx = -1
for s in range(psg_f.signals_in_file):
if ch_names[s] == select_ch:
select_ch_idx = s
break
if select_ch_idx == -1:
raise Exception("Channel not found.")
sampling_rate = 100.0
n_epoch_samples = int(epoch_duration * sampling_rate)
signals = psg_f.readSignal(select_ch_idx).reshape(-1, n_epoch_samples)
logger.info("Select channel: {}".format(select_ch))
logger.info("Select channel samples: {}".format(ch_samples[select_ch_idx]))
logger.info("Sample rate: {}".format(sampling_rate))
# Sanity check
n_epochs = psg_f.datarecords_in_file
if psg_f.datarecord_duration == 60: # Fix problems of SC4362F0-PSG.edf, SC4362FC-Hypnogram.edf
n_epochs = n_epochs * 2
assert len(signals) == n_epochs, f"signal: {signals.shape} != {n_epochs}"
# Generate labels from onset and duration annotation
labels = []
total_duration = 0
ann_onsets, ann_durations, ann_stages = ann_f.readAnnotations()
for a in range(len(ann_stages)):
onset_sec = int(ann_onsets[a])
duration_sec = int(ann_durations[a])
ann_str = "".join(ann_stages[a])
# Sanity check
assert onset_sec == total_duration
# Get label value
label = ann2label[ann_str]
# Compute # of epoch for this stage
if duration_sec % epoch_duration != 0:
logger.info(f"Something wrong: {duration_sec} {epoch_duration}")
raise Exception(f"Something wrong: {duration_sec} {epoch_duration}")
duration_epoch = int(duration_sec / epoch_duration)
# Generate sleep stage labels
label_epoch = np.ones(duration_epoch, dtype=np.int) * label
labels.append(label_epoch)
total_duration += duration_sec
logger.info("Include onset:{}, duration:{}, label:{} ({})".format(
onset_sec, duration_sec, label, ann_str
))
labels = np.hstack(labels)
# Remove annotations that are longer than the recorded signals
labels = labels[:len(signals)]
# Get epochs and their corresponding labels
x = signals.astype(np.float32)
y = labels.astype(np.int32)
# Select only sleep periods
w_edge_mins = 30
nw_idx = np.where(y != stage_dict["W"])[0]
start_idx = nw_idx[0] - (w_edge_mins * 2)
end_idx = nw_idx[-1] + (w_edge_mins * 2)
if start_idx < 0: start_idx = 0
if end_idx >= len(y): end_idx = len(y) - 1
select_idx = np.arange(start_idx, end_idx+1)
logger.info("Data before selection: {}, {}".format(x.shape, y.shape))
x = x[select_idx]
y = y[select_idx]
logger.info("Data after selection: {}, {}".format(x.shape, y.shape))
# Remove movement and unknown
move_idx = np.where(y == stage_dict["MOVE"])[0]
unk_idx = np.where(y == stage_dict["UNK"])[0]
if len(move_idx) > 0 or len(unk_idx) > 0:
remove_idx = np.union1d(move_idx, unk_idx)
logger.info("Remove irrelavant stages")
logger.info(" Movement: ({}) {}".format(len(move_idx), move_idx))
logger.info(" Unknown: ({}) {}".format(len(unk_idx), unk_idx))
logger.info(" Remove: ({}) {}".format(len(remove_idx), remove_idx))
logger.info(" Data before removal: {}, {}".format(x.shape, y.shape))
select_idx = np.setdiff1d(np.arange(len(x)), remove_idx)
x = x[select_idx]
y = y[select_idx]
logger.info(" Data after removal: {}, {}".format(x.shape, y.shape))
# Save
filename = ntpath.basename(psg_fnames[i]).replace("-PSG.edf", ".npz")
save_dict = {
"x": x,
"y": y,
"fs": sampling_rate,
"ch_label": select_ch,
"start_datetime": start_datetime,
"file_duration": file_duration,
"epoch_duration": epoch_duration,
"n_all_epochs": n_epochs,
"n_epochs": len(x),
}
np.savez(os.path.join(args.output_dir, filename), **save_dict)
logger.info("\n=======================================\n")
if __name__ == "__main__":
main()
| 7,734
| 35.314554
| 102
|
py
|
SleePyCo
|
SleePyCo-main/dset/Sleep-EDF-2018/download_sleep-edf-2018.py
|
import os
os.makedirs('./edf', exist_ok=True)
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4001E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4001EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4002E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4002EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4011E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4011EH-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4012E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4012EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4021E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4021EH-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4022E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4022EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4031E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4031EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4032E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4032EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4041E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4041EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4042E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4042EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4051E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4051EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4052E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4052EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4061E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4061EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4062E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4062EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4071E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4071EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4072E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4072EH-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4081E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4081EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4082E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4082EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4091E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4091EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4092E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4092EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4101E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4101EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4102E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4102EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4111E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4111EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4112E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4112EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4121E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4121EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4122E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4122EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4131E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4131EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4141E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4141EU-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4142E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4142EU-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4151E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4151EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4152E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4152EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4161E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4161EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4162E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4162EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4171E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4171EU-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4172E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4172EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4181E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4181EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4182E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4182EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4191E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4191EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4192E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4192EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4201E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4201EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4202E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4202EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4211E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4211EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4212E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4212EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4221E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4221EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4222E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4222EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4231E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4231EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4232E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4232EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4241E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4241EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4242E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4242EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4251E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4251EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4252E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4252EU-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4261F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4261FM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4262F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4262FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4271F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4271FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4272F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4272FM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4281G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4281GC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4282G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4282GC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4291G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4291GA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4292G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4292GC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4301E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4301EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4302E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4302EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4311E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4311EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4312E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4312EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4321E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4321EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4322E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4322EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4331F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4331FV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4332F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4332FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4341F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4341FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4342F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4342FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4351F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4351FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4352F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4352FV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4362F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4362FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4371F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4371FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4372F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4372FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4381F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4381FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4382F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4382FW-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4401E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4401EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4402E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4402EW-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4411E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4411EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4412E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4412EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4421E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4421EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4422E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4422EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4431E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4431EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4432E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4432EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4441E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4441EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4442E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4442EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4451F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4451FY-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4452F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4452FW-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4461F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4461FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4462F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4462FJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4471F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4471FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4472F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4472FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4481F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4481FV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4482F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4482FJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4491G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4491GJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4492G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4492GJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4501E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4501EW-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4502E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4502EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4511E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4511EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4512E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4512EW-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4522E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4522EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4531E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4531EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4532E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4532EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4541F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4541FA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4542F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4542FW-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4551F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4551FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4552F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4552FW-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4561F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4561FJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4562F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4562FJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4571F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4571FV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4572F0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4572FC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4581G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4581GM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4582G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4582GP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4591G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4591GY-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4592G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4592GY-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4601E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4601EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4602E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4602EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4611E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4611EG-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4612E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4612EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4621E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4621EV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4622E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4622EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4631E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4631EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4632E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4632EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4641E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4641EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4642E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4642EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4651E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4651EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4652E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4652EG-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4661E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4661EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4662E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4662EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4671G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4671GJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4672G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4672GV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4701E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4701EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4702E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4702EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4711E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4711EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4712E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4712EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4721E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4721EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4722E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4722EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4731E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4731EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4732E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4732EJ-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4741E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4741EA-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4742E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4742EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4751E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4751EC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4752E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4752EM-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4761E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4761EP-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4762E0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4762EG-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4771G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4771GC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4772G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4772GC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4801G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4801GC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4802G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4802GV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4811G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4811GG-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4812G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4812GV-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4821G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4821GC-Hypnogram.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4822G0-PSG.edf -P ./edf')
os.system('wget https://www.physionet.org/physiobank/database/sleep-edfx/sleep-cassette/SC4822GC-Hypnogram.edf -P ./edf')
| 36,460
| 116.996764
| 121
|
py
|
SASA
|
SASA-main/setup.py
|
import os
import subprocess
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
def get_git_commit_number():
if not os.path.exists('.git'):
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
return git_commit_number
def make_cuda_ext(name, module, sources):
cuda_ext = CUDAExtension(
name='%s.%s' % (module, name),
sources=[os.path.join(*module.split('.'), src) for src in sources]
)
return cuda_ext
def write_version_to_file(version, target_file):
with open(target_file, 'w') as f:
print('__version__ = "%s"' % version, file=f)
if __name__ == '__main__':
version = '0.3.0+%s' % get_git_commit_number()
write_version_to_file(version, 'pcdet/version.py')
setup(
name='pcdet',
version=version,
description='OpenPCDet is a general codebase for 3D object detection from point cloud',
install_requires=[
'numpy',
'torch>=1.1',
'spconv',
'numba',
'tensorboardX',
'easydict',
'pyyaml'
],
author='Shaoshuai Shi',
author_email='shaoshuaics@gmail.com',
license='Apache License 2.0',
packages=find_packages(exclude=['tools', 'data', 'output']),
cmdclass={'build_ext': BuildExtension},
ext_modules=[
make_cuda_ext(
name='iou3d_nms_cuda',
module='pcdet.ops.iou3d_nms',
sources=[
'src/iou3d_cpu.cpp',
'src/iou3d_nms_api.cpp',
'src/iou3d_nms.cpp',
'src/iou3d_nms_kernel.cu',
]
),
make_cuda_ext(
name='roiaware_pool3d_cuda',
module='pcdet.ops.roiaware_pool3d',
sources=[
'src/roiaware_pool3d.cpp',
'src/roiaware_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='roipoint_pool3d_cuda',
module='pcdet.ops.roipoint_pool3d',
sources=[
'src/roipoint_pool3d.cpp',
'src/roipoint_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='pointnet2_stack_cuda',
module='pcdet.ops.pointnet2.pointnet2_stack',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
],
),
make_cuda_ext(
name='pointnet2_batch_cuda',
module='pcdet.ops.pointnet2.pointnet2_batch',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
),
],
)
| 3,616
| 31.294643
| 95
|
py
|
SASA
|
SASA-main/tools/test.py
|
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
# start evaluation
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
if 'curve' in key:
for i in range(len(val['recalls'])):
tb_log.add_scalars(key,
dict(zip(val['precisions'].keys(), [_[i] for _ in val['precisions'].values()])),
val['recalls'][i] * 1000)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
if not 'curve' in key:
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
# draw precision-recall curves for the last evaluation
for key, val in tb_dict.items():
if 'curve' in key:
for i in range(len(val['recalls'])):
tb_log.add_scalars(key,
dict(zip(val['precisions'].keys(),
[_[i] for _ in val['precisions'].values()])),
val['recalls'][i] * 1000)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 9,291
| 40.855856
| 120
|
py
|
SASA
|
SASA-main/tools/demo.py
|
import argparse
import glob
from pathlib import Path
import mayavi.mlab as mlab
import numpy as np
import torch
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
from visual_utils import visualize_utils as V
class DemoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.root_path = root_path
self.ext = ext
data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
data_file_list.sort()
self.sample_file_list = data_file_list
def __len__(self):
return len(self.sample_file_list)
def __getitem__(self, index):
if self.ext == '.bin':
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)
elif self.ext == '.npy':
points = np.load(self.sample_file_list[index])
else:
raise NotImplementedError
input_dict = {
'points': points,
'frame_id': index,
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='demo_data',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
def main():
args, cfg = parse_config()
logger = common_utils.create_logger()
logger.info('-----------------Quick Demo of OpenPCDet-------------------------')
demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(args.data_path), ext=args.ext, logger=logger
)
logger.info(f'Total number of samples: \t{len(demo_dataset)}')
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
model.cuda()
model.eval()
with torch.no_grad():
for idx, data_dict in enumerate(demo_dataset):
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
pred_dicts, _ = model.forward(data_dict)
V.draw_scenes(
points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'],
ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']
)
mlab.show(stop=True)
logger.info('Demo done.')
if __name__ == '__main__':
main()
| 3,575
| 33.384615
| 118
|
py
|
SASA
|
SASA-main/tools/train.py
|
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.distributed as dist
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', type=int, default=666, help='random seed')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=10, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if not args.fix_random_seed == 0:
common_utils.set_random_seed(args.fix_random_seed)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
train_set, train_loader, train_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
total_iters_each_epoch = len(train_loader)
if args.merge_all_iters_to_one_epoch:
total_iters_each_epoch = total_iters_each_epoch // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model,
optimizer,
train_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
train_sampler=train_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - 10, 0) # Only evaluate the last 10 epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 8,839
| 42.762376
| 118
|
py
|
SASA
|
SASA-main/tools/eval_utils/eval_utils.py
|
import pickle
import time
import numpy as np
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for key in metric.keys():
if key in ret_dict:
metric[key] += ret_dict[key]
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
# initialize customized statistics
if hasattr(model, 'init_recall_record'):
model.init_recall_record(metric)
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
# print customized statistics
if hasattr(model, 'disp_recall_record'):
model.disp_recall_record(metric, logger, sample_num=len(dataloader.dataset))
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
| 4,772
| 34.887218
| 131
|
py
|
SASA
|
SASA-main/tools/train_utils/train_utils.py
|
import glob
import os
import torch
import tqdm
from torch.nn.utils import clip_grad_norm_
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 5,667
| 37.297297
| 117
|
py
|
SASA
|
SASA-main/tools/train_utils/optimization/fastai_optim.py
|
# This file is modified from https://github.com/traveller59/second.pytorch
from collections import Iterable
import torch
from torch import nn
from torch._utils import _unflatten_dense_tensors
from torch.nn.utils import parameters_to_vector
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
def split_bn_bias(layer_groups):
"Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."
split_groups = []
for l in layer_groups:
l1, l2 = [], []
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
def get_master(layer_groups, flat_master: bool = False):
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp: param.requires_grad = True
return model_params, master_params
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None: master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master2model(model_params, master_params, flat_master: bool = False) -> None:
"Copy `master_params` to `model_params`."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(model_group) != 0:
for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)):
model.data.copy_(master)
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group): model.data.copy_(master.data)
def listify(p=None, q=None):
"Make `p` listy and the same length as `q`."
if p is None:
p = []
elif isinstance(p, str):
p = [p]
elif not isinstance(p, Iterable):
p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1: p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
def trainable_params(m: nn.Module):
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res
def is_tuple(x) -> bool: return isinstance(x, tuple)
# copy from fastai.
class OptimWrapper():
"Basic wrapper around `opt` to simplify hyper-parameters changes."
def __init__(self, opt, wd, true_wd: bool = False, bn_wd: bool = True):
self.opt, self.true_wd, self.bn_wd = opt, true_wd, bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_func, lr,
layer_groups, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
def new(self, layer_groups):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def __repr__(self) -> str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
# Pytorch optimizer methods
def step(self) -> None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
for p in pg1['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self) -> None:
"Clear optimizer gradients."
self.opt.zero_grad()
# Passthrough to the inner opt.
def __getattr__(self, k: str):
return getattr(self.opt, k, None)
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
# Hyperparameters as properties
@property
def lr(self) -> float:
return self._lr[-1]
@lr.setter
def lr(self, val: float) -> None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self) -> float:
return self._mom[-1]
@mom.setter
def mom(self, val: float) -> None:
if 'momentum' in self.opt_keys:
self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys:
self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self) -> float:
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val: float) -> None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self) -> float:
return self._wd[-1]
@wd.setter
def wd(self, val: float) -> None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
# Helper functions
def read_defaults(self) -> None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key: str, val, bn_groups: bool = True):
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key: str):
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class FastAIMixedOptim(OptimWrapper):
@classmethod
def create(cls, opt_func, lr,
layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
# Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
def step(self):
model_g2master_g(self.model_params, self.master_params, self.flat_master)
for group in self.master_params:
for param in group: param.grad.div_(self.loss_scale)
super(FastAIMixedOptim, self).step()
self.model.zero_grad()
# Update the params from master to model.
master2model(self.model_params, self.master_params, self.flat_master)
| 10,477
| 38.992366
| 117
|
py
|
SASA
|
SASA-main/tools/train_utils/optimization/learning_schedules_fastai.py
|
# This file is modified from https://github.com/traveller59/second.pytorch
import math
from functools import partial
import numpy as np
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
class LRSchedulerStep(object):
def __init__(self, fai_optimizer: OptimWrapper, total_step, lr_phases,
mom_phases):
# if not isinstance(fai_optimizer, OptimWrapper):
# raise TypeError('{} is not a fastai OptimWrapper'.format(
# type(fai_optimizer).__name__))
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(lr_phases) - 1:
self.lr_phases.append((int(start * total_step), int(lr_phases[i + 1][0] * total_step), lambda_func))
else:
self.lr_phases.append((int(start * total_step), total_step, lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(mom_phases) - 1:
self.mom_phases.append((int(start * total_step), int(mom_phases[i + 1][0] * total_step), lambda_func))
else:
self.mom_phases.append((int(start * total_step), total_step, lambda_func))
assert self.mom_phases[0][0] == 0
def step(self, step):
for start, end, func in self.lr_phases:
if step >= start:
self.optimizer.lr = func((step - start) / (end - start))
for start, end, func in self.mom_phases:
if step >= start:
self.optimizer.mom = func((step - start) / (end - start))
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
class OneCycle(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor,
pct_start):
self.lr_max = lr_max
self.moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
a1 = int(total_step * self.pct_start)
a2 = total_step - a1
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0]
super().__init__(fai_optimizer, total_step, lr_phases, mom_phases)
class CosineWarmupLR(lr_sched._LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
class FakeOptim:
def __init__(self):
self.lr = 0
self.mom = 0
if __name__ == "__main__":
import matplotlib.pyplot as plt
opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10
schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.1)
lrs = []
moms = []
for i in range(100):
schd.step(i)
lrs.append(opt.lr)
moms.append(opt.mom)
plt.plot(lrs)
# plt.plot(moms)
plt.show()
plt.plot(moms)
plt.show()
| 4,169
| 35.26087
| 118
|
py
|
SASA
|
SASA-main/tools/train_utils/optimization/__init__.py
|
from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle
def build_optimizer(model, optim_cfg):
if optim_cfg.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY,
momentum=optim_cfg.MOMENTUM
)
elif optim_cfg.OPTIMIZER == 'adam' or optim_cfg.OPTIMIZER == 'adam_onecycle':
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST]
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in decay_steps:
if cur_epoch >= decay_step:
cur_decay = cur_decay * optim_cfg.LR_DECAY
return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR)
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if optim_cfg.OPTIMIZER == 'adam_onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START
)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
if optim_cfg.LR_WARMUP:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=optim_cfg.WARMUP_EPOCH * len(total_iters_each_epoch),
eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR
)
return lr_scheduler, lr_warmup_scheduler
| 2,289
| 35.935484
| 113
|
py
|
SASA
|
SASA-main/tools/visual_utils/visualize_utils.py
|
import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(800, 600), draw_origin=True):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None and len(ref_boxes) > 0:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
| 8,540
| 38.541667
| 121
|
py
|
SASA
|
SASA-main/pcdet/config.py
|
from pathlib import Path
import yaml
from easydict import EasyDict
def log_config_to_file(cfg, pre='cfg', logger=None):
for key, val in cfg.items():
if isinstance(cfg[key], EasyDict):
logger.info('\n%s.%s = edict()' % (pre, key))
log_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger)
continue
logger.info('%s.%s: %s' % (pre, key, val))
def cfg_from_list(cfg_list, config):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = config
for subkey in key_list[:-1]:
assert subkey in d, 'NotFoundKey: %s' % subkey
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'NotFoundKey: %s' % subkey
try:
value = literal_eval(v)
except:
value = v
if type(value) != type(d[subkey]) and isinstance(d[subkey], EasyDict):
key_val_list = value.split(',')
for src in key_val_list:
cur_key, cur_val = src.split(':')
val_type = type(d[subkey][cur_key])
cur_val = val_type(cur_val)
d[subkey][cur_key] = cur_val
elif type(value) != type(d[subkey]) and isinstance(d[subkey], list):
val_list = value.split(',')
for k, x in enumerate(val_list):
val_list[k] = type(d[subkey][0])(x)
d[subkey] = val_list
else:
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
def merge_new_config(config, new_config):
if '_BASE_CONFIG_' in new_config:
with open(new_config['_BASE_CONFIG_'], 'r') as f:
try:
yaml_config = yaml.load(f, Loader=yaml.FullLoader)
except:
yaml_config = yaml.load(f)
config.update(EasyDict(yaml_config))
for key, val in new_config.items():
if not isinstance(val, dict):
config[key] = val
continue
if key not in config:
config[key] = EasyDict()
merge_new_config(config[key], val)
return config
def cfg_from_yaml_file(cfg_file, config):
with open(cfg_file, 'r') as f:
try:
new_config = yaml.load(f, Loader=yaml.FullLoader)
except:
new_config = yaml.load(f)
merge_new_config(config=config, new_config=new_config)
return config
cfg = EasyDict()
cfg.ROOT_DIR = (Path(__file__).resolve().parent / '../').resolve()
cfg.LOCAL_RANK = 0
| 2,750
| 30.988372
| 94
|
py
|
SASA
|
SASA-main/pcdet/__init__.py
|
import subprocess
from pathlib import Path
from .version import __version__
__all__ = [
'__version__'
]
def get_git_commit_number():
if not (Path(__file__).parent / '../.git').exists():
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
return git_commit_number
script_version = get_git_commit_number()
if script_version not in __version__:
__version__ = __version__ + '+py%s' % script_version
| 535
| 20.44
| 82
|
py
|
SASA
|
SASA-main/pcdet/models/__init__.py
|
from collections import namedtuple
import numpy as np
import torch
from .detectors import build_detector
def build_network(model_cfg, num_class, dataset):
model = build_detector(
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
def load_data_to_gpu(batch_dict):
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
if key in ['frame_id', 'metadata', 'calib', 'image_shape']:
continue
batch_dict[key] = torch.from_numpy(val).float().cuda()
def model_fn_decorator():
ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
def model_func(model, batch_dict):
load_data_to_gpu(batch_dict)
ret_dict, tb_dict, disp_dict = model(batch_dict)
loss = ret_dict['loss'].mean()
if hasattr(model, 'update_global_step'):
model.update_global_step()
else:
model.module.update_global_step()
return ModelReturn(loss, tb_dict, disp_dict)
return model_func
| 1,074
| 25.219512
| 77
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/point_rcnn.py
|
from .detector3d_template import Detector3DTemplate
class PointRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_point, tb_dict = self.point_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| 999
| 31.258065
| 83
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/pointpillar.py
|
from .detector3d_template import Detector3DTemplate
class PointPillar(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
| 1,018
| 28.114286
| 83
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/second_net.py
|
from .detector3d_template import Detector3DTemplate
class SECONDNet(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
| 1,016
| 28.057143
| 83
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/detector3d_template.py
|
import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
def generate_recall_record(self, box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt_num': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt_num'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if 'version' in checkpoint:
logger.info('==> Checkpoint trained from version: %s' % checkpoint['version'])
update_model_state = {}
for key, val in model_state_disk.items():
if key in self.state_dict() and self.state_dict()[key].shape == model_state_disk[key].shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
state_dict = self.state_dict()
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(self.state_dict())))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self.load_state_dict(checkpoint['model_state'])
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 17,009
| 44.119363
| 111
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/PartA2_net.py
|
from .detector3d_template import Detector3DTemplate
class PartA2Net(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| 1,072
| 32.53125
| 83
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/pv_rcnn.py
|
from .detector3d_template import Detector3DTemplate
class PVRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| 1,069
| 32.4375
| 83
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/__init__.py
|
from .detector3d_template import Detector3DTemplate
from .PartA2_net import PartA2Net
from .point_rcnn import PointRCNN
from .pointpillar import PointPillar
from .pv_rcnn import PVRCNN
from .second_net import SECONDNet
from .point_3dssd import Point3DSSD
__all__ = {
'Detector3DTemplate': Detector3DTemplate,
'SECONDNet': SECONDNet,
'PartA2Net': PartA2Net,
'PVRCNN': PVRCNN,
'PointPillar': PointPillar,
'PointRCNN': PointRCNN,
'3DSSD': Point3DSSD
}
def build_detector(model_cfg, num_class, dataset):
model = __all__[model_cfg.NAME](
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
| 658
| 24.346154
| 65
|
py
|
SASA
|
SASA-main/pcdet/models/detectors/point_3dssd.py
|
import torch
from .detector3d_template import Detector3DTemplate
from ...ops.iou3d_nms import iou3d_nms_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
class Point3DSSD(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_point, tb_dict = self.point_head.get_loss()
loss = loss_point
return loss, tb_dict, disp_dict
def init_recall_record(self, metric, **kwargs):
# initialize gt_num for all classes
for cur_cls in range(len(self.class_names)):
metric['gt_num[%s]' % self.class_names[cur_cls]] = 0
# initialize statistics of all sampling segments
npoint_list = self.model_cfg.BACKBONE_3D.SA_CONFIG.NPOINT_LIST
for cur_layer in range(len(npoint_list)):
for cur_seg in range(len(npoint_list[cur_layer])):
metric['positive_point_L%dS%d' % (cur_layer, cur_seg)] = 0
metric['recall_point_L%dS%d' % (cur_layer, cur_seg)] = 0
for cur_cls in range(self.num_class):
metric['recall_point_L%dS%d[%s]' \
% (cur_layer, cur_seg, self.class_names[cur_cls])] = 0
# initialize statistics of the vote layer
metric['positive_point_candidate'] = 0
metric['recall_point_candidate'] = 0
metric['positive_point_vote'] = 0
metric['recall_point_vote'] = 0
for cur_cls in range(len(self.class_names)):
metric['recall_point_candidate[%s]' % self.class_names[cur_cls]] = 0
metric['recall_point_vote[%s]' % self.class_names[cur_cls]] = 0
def generate_recall_record(self, box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
# point_coords format: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_list = data_dict['point_coords_list'] # ignore raw point input
npoint_list = self.model_cfg.BACKBONE_3D.SA_CONFIG.NPOINT_LIST
assert len(point_list) == len(npoint_list)
cur_points_list = []
for cur_layer in range(npoint_list.__len__()):
cur_points = point_list[cur_layer]
bs_idx = cur_points[:, 0]
bs_mask = (bs_idx == batch_index)
cur_points = cur_points[bs_mask][:, 1:4]
cur_points_list.append(cur_points.split(npoint_list[cur_layer], dim=0))
base_points = data_dict['point_candidate_coords']
vote_points = data_dict['point_vote_coords']
bs_idx = base_points[:, 0]
bs_mask = (bs_idx == batch_index)
base_points = base_points[bs_mask][:, 1:4]
vote_points = vote_points[bs_mask][:, 1:4]
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
# initialize recall_dict
if recall_dict.__len__() == 0:
recall_dict = {'gt_num': 0}
for cur_thresh in thresh_list:
recall_dict['recall_roi_%s' % (str(cur_thresh))] = 0
recall_dict['recall_rcnn_%s' % (str(cur_thresh))] = 0
self.init_recall_record(recall_dict) # init customized statistics
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
# backbone
for cur_layer in range(len(npoint_list)):
for cur_seg in range(len(npoint_list[cur_layer])):
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
cur_points_list[cur_layer][cur_seg].unsqueeze(dim=0),
cur_gt[None, :, :7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
recall_dict['positive_point_L%dS%d' % (cur_layer, cur_seg)] += box_fg_flag.long().sum().item()
box_recalled = box_idxs_of_pts[box_fg_flag].unique()
recall_dict['recall_point_L%dS%d' % (cur_layer, cur_seg)] += box_recalled.size(0)
box_recalled_cls = cur_gt[box_recalled, -1]
for cur_cls in range(self.num_class):
recall_dict['recall_point_L%dS%d[%s]' % (cur_layer, cur_seg, self.class_names[cur_cls])] += \
(box_recalled_cls == (cur_cls + 1)).sum().item()
# candidate points
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
base_points.unsqueeze(dim=0), cur_gt[None, :, :7]
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
recall_dict['positive_point_candidate'] += box_fg_flag.long().sum().item()
box_recalled = box_idxs_of_pts[box_fg_flag].unique()
recall_dict['recall_point_candidate'] += box_recalled.size(0)
box_recalled_cls = cur_gt[box_recalled, -1]
for cur_cls in range(self.num_class):
recall_dict['recall_point_candidate[%s]' % self.class_names[cur_cls]] += \
(box_recalled_cls == (cur_cls + 1)).sum().item()
# vote points
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
vote_points.unsqueeze(dim=0), cur_gt[None, :, :7]
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
recall_dict['positive_point_vote'] += box_fg_flag.long().sum().item()
box_recalled = box_idxs_of_pts[box_fg_flag].unique()
recall_dict['recall_point_vote'] += box_recalled.size(0)
box_recalled_cls = cur_gt[box_recalled, -1]
for cur_cls in range(self.num_class):
recall_dict['recall_point_vote[%s]' % self.class_names[cur_cls]] += \
(box_recalled_cls == (cur_cls + 1)).sum().item()
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['recall_rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['recall_rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['recall_roi_%s' % str(cur_thresh)] += roi_recalled
cur_gt_class = cur_gt[:, -1]
for cur_cls in range(self.num_class):
cur_cls_gt_num = (cur_gt_class == cur_cls + 1).sum().item()
recall_dict['gt_num'] += cur_cls_gt_num
recall_dict['gt_num[%s]' % self.class_names[cur_cls]] += cur_cls_gt_num
return recall_dict
def disp_recall_record(self, metric, logger, sample_num, **kwargs):
gt_num = metric['gt_num']
gt_num_cls = [metric['gt_num[%s]' % cur_cls] for cur_cls in self.class_names]
# backbone
for k in metric.keys():
if 'positive_point_' in k: # count the number of positive points
cur_positive_point = metric[k] / sample_num
logger.info(k + (': %f' % cur_positive_point))
elif 'recall_point_' in k and not any(cur_cls in k for cur_cls in self.class_names):
cur_recall_point = metric[k] / max(gt_num, 1)
logger.info(k + (': %f' % cur_recall_point))
for cur_cls in range(len(self.class_names)):
cur_recall_point_cls = metric[k + '[%s]' % self.class_names[cur_cls]] / max(gt_num_cls[cur_cls], 1)
logger.info('\t- ' + self.class_names[cur_cls] + ': %f' % cur_recall_point_cls)
# candidate points
positive_point_candidate = metric['positive_point_candidate'] / sample_num
logger.info('positive_point_candidate: %f' % positive_point_candidate)
recall_point_candidate = metric['recall_point_candidate'] / max(gt_num, 1)
logger.info('recall_point_candidate: %f' % recall_point_candidate)
for cur_cls in range(len(self.class_names)):
cur_recall_point_cls = metric['recall_point_candidate' + '[%s]' % self.class_names[cur_cls]] / max(gt_num_cls[cur_cls], 1)
logger.info('\t- ' + self.class_names[cur_cls] + ': %f' % cur_recall_point_cls)
# vote points
positive_point_vote = metric['positive_point_vote'] / sample_num
logger.info('positive_point_vote: %f' % positive_point_vote)
recall_point_vote = metric['recall_point_vote'] / max(gt_num, 1)
logger.info('recall_point_vote: %f' % recall_point_vote)
for cur_cls in range(len(self.class_names)):
cur_recall_point_cls = metric['recall_point_vote' + '[%s]' % self.class_names[cur_cls]] / max(gt_num_cls[cur_cls], 1)
logger.info('\t- ' + self.class_names[cur_cls] + ': %f' % cur_recall_point_cls)
| 10,022
| 47.892683
| 134
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/spconv_unet.py
|
from functools import partial
import spconv
import torch
import torch.nn as nn
from ...utils import common_utils
from .spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
return out
class UNetV2(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
if self.model_cfg.get('RETURN_ENCODED_TENSOR', True):
last_pad = self.model_cfg.get('last_pad', 0)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
else:
self.conv_out = None
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x.features = x_m.features + x.features
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x.features = features.view(n, out_channels, -1).sum(dim=2)
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
if self.conv_out is not None:
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict['encoded_spconv_tensor'] = out
batch_dict['encoded_spconv_tensor_stride'] = 8
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['point_features'] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)
return batch_dict
| 8,445
| 38.839623
| 117
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/spconv_backbone.py
|
from functools import partial
import spconv
import torch.nn as nn
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif conv_type == 'spconv':
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key)
elif conv_type == 'inverseconv':
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
norm_fn(out_channels),
nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity.features
out.features = self.relu(out.features)
return out
class VoxelBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
return batch_dict
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
return batch_dict
| 9,376
| 34.790076
| 118
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/__init__.py
|
from .pointnet2_backbone import PointNet2Backbone, PointNet2MSG, PointNet2FSMSG
from .spconv_backbone import VoxelBackBone8x, VoxelResBackBone8x
from .spconv_unet import UNetV2
__all__ = {
'VoxelBackBone8x': VoxelBackBone8x,
'UNetV2': UNetV2,
'PointNet2Backbone': PointNet2Backbone,
'PointNet2MSG': PointNet2MSG,
'PointNet2FSMSG': PointNet2FSMSG,
'VoxelResBackBone8x': VoxelResBackBone8x,
}
| 416
| 31.076923
| 79
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/pointnet2_backbone.py
|
import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_modules_stack
from ...ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_utils_stack
class PointNet2MSG(nn.Module):
def __init__(self, model_cfg, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels - 3]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleMSG(
npoint=self.model_cfg.SA_CONFIG.NPOINTS[k],
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
skip_sa_block = self.model_cfg.SA_CONFIG.NPOINTS.__len__() - self.model_cfg.FP_MLPS.__len__()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules.PointnetFPModule(
mlp=[pre_channel + skip_channel_list[k + skip_sa_block]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3)
features = features.view(batch_size, -1, features.shape[-1]).permute(0, 2, 1) if features is not None else None
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
) # (B, C, N)
point_features = l_features[0].permute(0, 2, 1).contiguous() # (B, N, C)
batch_dict['point_features'] = point_features.view(-1, point_features.shape[-1])
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0].view(-1, 3)), dim=1)
return batch_dict
class PointNet2FSMSG(nn.Module):
def __init__(self, model_cfg, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
use_xyz = self.model_cfg.SA_CONFIG.get('USE_XYZ', True)
dilated_group = self.model_cfg.SA_CONFIG.get('DILATED_RADIUS_GROUP', False)
skip_connection = self.model_cfg.SA_CONFIG.get('SKIP_CONNECTION', False)
weight_gamma = self.model_cfg.SA_CONFIG.get('WEIGHT_GAMMA', 1.0)
self.aggregation_mlps = self.model_cfg.SA_CONFIG.get('AGGREGATION_MLPS', None)
self.confidence_mlps = self.model_cfg.SA_CONFIG.get('CONFIDENCE_MLPS', None)
self.num_points_each_layer = []
skip_channel_list = [input_channels - 3]
for k in range(self.model_cfg.SA_CONFIG.NPOINT_LIST.__len__()):
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
if skip_connection:
channel_out += channel_in
if self.aggregation_mlps and self.aggregation_mlps[k]:
aggregation_mlp = self.aggregation_mlps[k].copy()
if aggregation_mlp.__len__() == 0:
aggregation_mlp = None
else:
channel_out = aggregation_mlp[-1]
else:
aggregation_mlp = None
if self.confidence_mlps and self.confidence_mlps[k]:
confidence_mlp = self.confidence_mlps[k].copy()
if confidence_mlp.__len__() == 0:
confidence_mlp = None
else:
confidence_mlp = None
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleFSMSG(
npoint_list=self.model_cfg.SA_CONFIG.NPOINT_LIST[k],
sample_range_list=self.model_cfg.SA_CONFIG.SAMPLE_RANGE_LIST[k],
sample_method_list=self.model_cfg.SA_CONFIG.SAMPLE_METHOD_LIST[k],
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=use_xyz,
dilated_radius_group=dilated_group,
skip_connection=skip_connection,
weight_gamma=weight_gamma,
aggregation_mlp=aggregation_mlp,
confidence_mlp=confidence_mlp
)
)
self.num_points_each_layer.append(
sum(self.model_cfg.SA_CONFIG.NPOINT_LIST[k]))
skip_channel_list.append(channel_out)
channel_in = channel_out
self.num_point_features = channel_out
fp_mlps = self.model_cfg.get('FP_MLPS', None)
if fp_mlps is not None:
self.FP_modules = nn.ModuleList()
l_skipped = self.model_cfg.SA_CONFIG.NPOINT_LIST.__len__() - self.model_cfg.FP_MLPS.__len__()
for k in range(fp_mlps.__len__()):
pre_channel = fp_mlps[k + 1][-1] if k + 1 < len(fp_mlps) else channel_out
self.FP_modules.append(
pointnet2_modules.PointnetFPModule(
mlp=[pre_channel + skip_channel_list[k + l_skipped]] + fp_mlps[k]
)
)
self.num_point_features = fp_mlps[0][-1]
else:
self.FP_modules = None
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
point_coords: (N, 3)
point_features: (N, C)
point_confidence_scores: (N, 1)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3).contiguous()
features = features.view(batch_size, -1, features.shape[-1]) if features is not None else None
features = features.permute(0, 2, 1).contiguous() if features is not None else None
batch_idx = batch_idx.view(batch_size, -1).float()
l_xyz, l_features, l_scores = [xyz], [features], [None]
for i in range(len(self.SA_modules)):
li_xyz, li_features, li_scores = self.SA_modules[i](
l_xyz[i], l_features[i], scores=l_scores[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
l_scores.append(li_scores)
# prepare for confidence loss
l_xyz_flatten, l_scores_flatten = [], []
for i in range(1, len(l_xyz)):
l_xyz_flatten.append(torch.cat([
batch_idx[:, :l_xyz[i].size(1)].reshape(-1, 1),
l_xyz[i].reshape(-1, 3)
], dim=1)) # (N, 4)
for i in range(1, len(l_scores)):
if l_scores[i] is None:
l_scores_flatten.append(None)
else:
l_scores_flatten.append(l_scores[i].reshape(-1, 1)) # (N, 1)
batch_dict['point_coords_list'] = l_xyz_flatten
batch_dict['point_scores_list'] = l_scores_flatten
if self.FP_modules is not None:
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
) # (B, C, N)
else: # take l_xyz[i - 1] and l_features[i - 1]
i = 0
point_features = l_features[i - 1].permute(0, 2, 1).contiguous() # (B, N, C)
batch_dict['point_features'] = point_features.view(-1, point_features.shape[-1])
batch_dict['point_coords'] = torch.cat((
batch_idx[:, :l_xyz[i - 1].size(1)].reshape(-1, 1).float(),
l_xyz[i - 1].view(-1, 3)), dim=1)
batch_dict['point_scores'] = l_scores[-1] # (B, N)
return batch_dict
class PointNet2Backbone(nn.Module):
"""
DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723
"""
def __init__(self, model_cfg, input_channels, **kwargs):
assert False, 'DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723'
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
self.num_points_each_layer.append(self.model_cfg.SA_CONFIG.NPOINTS[k])
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules_stack.StackSAModuleMSG(
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules_stack.StackPointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
l_xyz, l_features, l_batch_cnt = [xyz], [features], [xyz_batch_cnt]
for i in range(len(self.SA_modules)):
new_xyz_list = []
for k in range(batch_size):
if len(l_xyz) == 1:
cur_xyz = l_xyz[0][batch_idx == k]
else:
last_num_points = self.num_points_each_layer[i - 1]
cur_xyz = l_xyz[-1][k * last_num_points: (k + 1) * last_num_points]
cur_pt_idxs = pointnet2_utils_stack.furthest_point_sample(
cur_xyz[None, :, :].contiguous(), self.num_points_each_layer[i]
).long()[0]
if cur_xyz.shape[0] < self.num_points_each_layer[i]:
empty_num = self.num_points_each_layer[i] - cur_xyz.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
new_xyz_list.append(cur_xyz[cur_pt_idxs])
new_xyz = torch.cat(new_xyz_list, dim=0)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(self.num_points_each_layer[i])
li_xyz, li_features = self.SA_modules[i](
xyz=l_xyz[i], features=l_features[i], xyz_batch_cnt=l_batch_cnt[i],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
)
l_xyz.append(li_xyz)
l_features.append(li_features)
l_batch_cnt.append(new_xyz_batch_cnt)
l_features[0] = points[:, 1:]
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
unknown=l_xyz[i - 1], unknown_batch_cnt=l_batch_cnt[i - 1],
known=l_xyz[i], known_batch_cnt=l_batch_cnt[i],
unknown_feats=l_features[i - 1], known_feats=l_features[i]
)
batch_dict['point_features'] = l_features[0]
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0]), dim=1)
return batch_dict
| 15,273
| 41.077135
| 119
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/pfe/__init__.py
|
from .voxel_set_abstraction import VoxelSetAbstraction
__all__ = {
'VoxelSetAbstraction': VoxelSetAbstraction
}
| 117
| 18.666667
| 54
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py
|
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
mlps = SA_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [mlps[k][0]] + mlps[k]
cur_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg[src_name].POOL_RADIUS,
nsamples=SA_cfg[src_name].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool',
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += sum([x[-1] for x in mlps])
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
mlps = SA_cfg['raw_points'].MLPS
for k in range(len(mlps)):
mlps[k] = [num_rawpoint_features - 3] + mlps[k]
self.SA_rawpoints = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg['raw_points'].POOL_RADIUS,
nsamples=SA_cfg['raw_points'].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool'
)
c_in += sum([x[-1] for x in mlps])
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
x_idxs = (keypoints[:, :, 0] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, :, 1] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
cur_x_idxs = x_idxs[k]
cur_y_idxs = y_idxs[k]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features.unsqueeze(dim=0))
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0)
return point_bev_features
def get_sampled_points(self, batch_dict):
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'FastFPS':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
xyz = raw_points[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (raw_points[:, 0] == bs_idx).sum()
point_features = raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None
pooled_points, pooled_features = self.SA_rawpoints(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
pooled_points, pooled_features = self.SA_layers[k](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=batch_dict['multi_scale_3d_features'][src_name].features.contiguous(),
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
point_features = torch.cat(point_features_list, dim=2)
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1)
point_coords = torch.cat((batch_idx.view(-1, 1).float(), keypoints.view(-1, 3)), dim=1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = point_coords # (BxN, 4)
return batch_dict
| 9,638
| 39.1625
| 121
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/vfe/vfe_template.py
|
import torch.nn as nn
class VFETemplate(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
"""
Args:
**kwargs:
Returns:
batch_dict:
...
vfe_features: (num_voxels, C)
"""
raise NotImplementedError
| 470
| 19.478261
| 45
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/vfe/mean_vfe.py
|
import torch
from .vfe_template import VFETemplate
class MeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
def get_output_feature_dim(self):
return self.num_point_features
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points']
points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features)
points_mean = points_mean / normalizer
batch_dict['voxel_features'] = points_mean.contiguous()
return batch_dict
| 1,038
| 31.46875
| 99
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/vfe/pillar_vfe.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.part = 50000
def forward(self, inputs):
if inputs.shape[0] > self.part:
# nn.Linear performs randomly when batch size is too large
num_parts = inputs.shape[0] // self.part
part_linear_out = [self.linear(inputs[num_part*self.part:(num_part+1)*self.part])
for num_part in range(num_parts+1)]
x = torch.cat(part_linear_out, dim=0)
else:
x = self.linear(inputs)
torch.backends.cudnn.enabled = False
x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x
torch.backends.cudnn.enabled = True
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range):
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayer(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
def get_output_feature_dim(self):
return self.num_filters[-1]
def get_paddings_indicator(self, actual_num, max_num, axis=0):
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
def forward(self, batch_dict, **kwargs):
voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords']
points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[:, :, :3] - points_mean
f_center = torch.zeros_like(voxel_features[:, :, :3])
f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset)
f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset)
f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset)
if self.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
voxel_count = features.shape[1]
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(voxel_features)
features *= mask
for pfn in self.pfn_layers:
features = pfn(features)
features = features.squeeze()
batch_dict['pillar_features'] = features
return batch_dict
| 5,089
| 40.048387
| 137
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_3d/vfe/__init__.py
|
from .mean_vfe import MeanVFE
from .pillar_vfe import PillarVFE
from .vfe_template import VFETemplate
__all__ = {
'VFETemplate': VFETemplate,
'MeanVFE': MeanVFE,
'PillarVFE': PillarVFE
}
| 200
| 19.1
| 37
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/anchor_head_single.py
|
import numpy as np
import torch.nn as nn
from .anchor_head_template import AnchorHeadTemplate
class AnchorHeadSingle(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
| 2,928
| 37.539474
| 136
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/point_head_template.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, loss_utils
class PointHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
self.add_module(
'reg_loss_func',
loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)
)
)
@staticmethod
def make_fc_layers(fc_cfg, input_channels, output_channels):
fc_layers = []
c_in = input_channels
for k in range(0, fc_cfg.__len__()):
fc_layers.extend([
nn.Linear(c_in, fc_cfg[k], bias=False),
nn.BatchNorm1d(fc_cfg[k]),
nn.ReLU(),
])
c_in = fc_cfg[k]
fc_layers.append(nn.Linear(c_in, output_channels, bias=True))
return nn.Sequential(*fc_layers)
def assign_stack_targets(self, points, gt_boxes, extend_gt_boxes=None,
ret_box_labels=False, ret_part_labels=False,
set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
ret_box_labels:
ret_part_labels:
set_ignore_flag:
use_ball_constraint:
central_radius:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3 and extend_gt_boxes.shape[2] == 8, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
assert set_ignore_flag != use_ball_constraint, 'Choose one only!'
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
point_box_labels = gt_boxes.new_zeros((points.shape[0], 8)) if ret_box_labels else None
point_part_labels = gt_boxes.new_zeros((points.shape[0], 3)) if ret_part_labels else None
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if set_ignore_flag:
extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()
).long().squeeze(dim=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius)
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()
point_cls_labels[bs_mask] = point_cls_labels_single
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 8))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long()
)
point_box_labels_single[fg_flag] = fg_point_box_labels
point_box_labels[bs_mask] = point_box_labels_single
if ret_part_labels:
point_part_labels_single = point_part_labels.new_zeros((bs_mask.sum(), 3))
transformed_points = points_single[fg_flag] - gt_box_of_fg_points[:, 0:3]
transformed_points = common_utils.rotate_points_along_z(
transformed_points.view(-1, 1, 3), -gt_box_of_fg_points[:, 6]
).view(-1, 3)
offset = torch.tensor([0.5, 0.5, 0.5]).view(1, 3).type_as(transformed_points)
point_part_labels_single[fg_flag] = (transformed_points / gt_box_of_fg_points[:, 3:6]) + offset
point_part_labels[bs_mask] = point_part_labels_single
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_box_labels': point_box_labels,
'point_part_labels': point_part_labels
}
return targets_dict
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1)
point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class)
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 15.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights)
point_loss_cls = cls_loss_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'point_loss_cls': point_loss_cls.item(),
'point_pos_num': pos_normalizer.item()
})
return point_loss_cls, tb_dict
def get_part_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
pos_normalizer = max(1, (pos_mask > 0).sum().item())
point_part_labels = self.forward_ret_dict['point_part_labels']
point_part_preds = self.forward_ret_dict['point_part_preds']
point_loss_part = F.binary_cross_entropy(torch.sigmoid(point_part_preds), point_part_labels, reduction='none')
point_loss_part = (point_loss_part.sum(dim=-1) * pos_mask.float()).sum() / (3 * pos_normalizer)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_part = point_loss_part * loss_weights_dict['point_part_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'point_loss_part': point_loss_part.item()})
return point_loss_part, tb_dict
def get_box_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
point_box_labels = self.forward_ret_dict['point_box_labels']
point_box_preds = self.forward_ret_dict['point_box_preds']
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
point_loss_box_src = self.reg_loss_func(
point_box_preds[None, ...], point_box_labels[None, ...], weights=reg_weights[None, ...]
)
point_loss_box = point_loss_box_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_box = point_loss_box * loss_weights_dict['point_box_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'point_loss_box': point_loss_box.item()})
return point_loss_box, tb_dict
def generate_predicted_boxes(self, points, point_cls_preds, point_box_preds):
"""
Args:
points: (N, 3)
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
Returns:
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
"""
_, pred_classes = point_cls_preds.max(dim=-1)
point_box_preds = self.box_coder.decode_torch(point_box_preds, points, pred_classes + 1)
return point_cls_preds, point_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| 9,475
| 45
| 119
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/anchor_head_template.py
|
import numpy as np
import torch
import torch.nn as nn
from ...utils import box_coder_utils, common_utils, loss_utils
from .target_assigner.anchor_generator import AnchorGenerator
from .target_assigner.atss_target_assigner import ATSSTargetAssigner
from .target_assigner.axis_aligned_target_assigner import AxisAlignedTargetAssigner
class AnchorHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class, class_names, grid_size, point_cloud_range, predict_boxes_when_training):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.class_names = class_names
self.predict_boxes_when_training = predict_boxes_when_training
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
anchor_target_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = getattr(box_coder_utils, anchor_target_cfg.BOX_CODER)(
num_dir_bins=anchor_target_cfg.get('NUM_DIR_BINS', 6),
**anchor_target_cfg.get('BOX_CODER_CONFIG', {})
)
anchor_generator_cfg = self.model_cfg.ANCHOR_GENERATOR_CONFIG
anchors, self.num_anchors_per_location = self.generate_anchors(
anchor_generator_cfg, grid_size=grid_size, point_cloud_range=point_cloud_range,
anchor_ndim=self.box_coder.code_size
)
self.anchors = [x.cuda() for x in anchors]
self.target_assigner = self.get_target_assigner(anchor_target_cfg)
self.forward_ret_dict = {}
self.build_losses(self.model_cfg.LOSS_CONFIG)
@staticmethod
def generate_anchors(anchor_generator_cfg, grid_size, point_cloud_range, anchor_ndim=7):
anchor_generator = AnchorGenerator(
anchor_range=point_cloud_range,
anchor_generator_config=anchor_generator_cfg
)
feature_map_size = [grid_size[:2] // config['feature_map_stride'] for config in anchor_generator_cfg]
anchors_list, num_anchors_per_location_list = anchor_generator.generate_anchors(feature_map_size)
if anchor_ndim != 7:
for idx, anchors in enumerate(anchors_list):
pad_zeros = anchors.new_zeros([*anchors.shape[0:-1], anchor_ndim - 7])
new_anchors = torch.cat((anchors, pad_zeros), dim=-1)
anchors_list[idx] = new_anchors
return anchors_list, num_anchors_per_location_list
def get_target_assigner(self, anchor_target_cfg):
if anchor_target_cfg.NAME == 'ATSS':
target_assigner = ATSSTargetAssigner(
topk=anchor_target_cfg.TOPK,
box_coder=self.box_coder,
use_multihead=self.use_multihead,
match_height=anchor_target_cfg.MATCH_HEIGHT
)
elif anchor_target_cfg.NAME == 'AxisAlignedTargetAssigner':
target_assigner = AxisAlignedTargetAssigner(
model_cfg=self.model_cfg,
class_names=self.class_names,
box_coder=self.box_coder,
match_height=anchor_target_cfg.MATCH_HEIGHT
)
else:
raise NotImplementedError
return target_assigner
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
reg_loss_name = 'WeightedSmoothL1Loss' if losses_cfg.get('REG_LOSS_TYPE', None) is None \
else losses_cfg.REG_LOSS_TYPE
self.add_module(
'reg_loss_func',
getattr(loss_utils, reg_loss_name)(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights'])
)
self.add_module(
'dir_loss_func',
loss_utils.WeightedCrossEntropyLoss()
)
def assign_targets(self, gt_boxes):
"""
Args:
gt_boxes: (B, M, 8)
Returns:
"""
targets_dict = self.target_assigner.assign_targets(
self.anchors, gt_boxes
)
return targets_dict
def get_cls_layer_loss(self):
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
cls_targets = cls_targets.unsqueeze(dim=-1)
cls_targets = cls_targets.squeeze(dim=-1)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds.dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
tb_dict = {
'rpn_loss_cls': cls_loss.item()
}
return cls_loss, tb_dict
@staticmethod
def add_sin_difference(boxes1, boxes2, dim=6):
assert dim != -1
rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * torch.cos(boxes2[..., dim:dim + 1])
rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * torch.sin(boxes2[..., dim:dim + 1])
boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1:]], dim=-1)
boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1:]], dim=-1)
return boxes1, boxes2
@staticmethod
def get_direction_target(anchors, reg_targets, one_hot=True, dir_offset=0, num_bins=2):
batch_size = reg_targets.shape[0]
anchors = anchors.view(batch_size, -1, anchors.shape[-1])
rot_gt = reg_targets[..., 6] + anchors[..., 6]
offset_rot = common_utils.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()
dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_targets = torch.zeros(*list(dir_cls_targets.shape), num_bins, dtype=anchors.dtype,
device=dir_cls_targets.device)
dir_targets.scatter_(-1, dir_cls_targets.unsqueeze(dim=-1).long(), 1.0)
dir_cls_targets = dir_targets
return dir_cls_targets
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(box_preds.shape[0])
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1]) for anchor in
self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
box_preds = box_preds.view(batch_size, -1,
box_preds.shape[-1] // self.num_anchors_per_location if not self.use_multihead else
box_preds.shape[-1])
# sin(a - b) = sinacosb-cosasinb
box_preds_sin, reg_targets_sin = self.add_sin_difference(box_preds, box_reg_targets)
loc_loss_src = self.reg_loss_func(box_preds_sin, reg_targets_sin, weights=reg_weights) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
box_loss = loc_loss
tb_dict = {
'rpn_loss_loc': loc_loss.item()
}
if box_dir_cls_preds is not None:
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS
)
dir_logits = box_dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
weights = positives.type_as(dir_logits)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.dir_loss_func(dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight']
box_loss += dir_loss
tb_dict['rpn_loss_dir'] = dir_loss.item()
return box_loss, tb_dict
def get_loss(self):
cls_loss, tb_dict = self.get_cls_layer_loss()
box_loss, tb_dict_box = self.get_box_reg_layer_loss()
tb_dict.update(tb_dict_box)
rpn_loss = cls_loss + box_loss
tb_dict['rpn_loss'] = rpn_loss.item()
return rpn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, cls_preds, box_preds, dir_cls_preds=None):
"""
Args:
batch_size:
cls_preds: (N, H, W, C1)
box_preds: (N, H, W, C2)
dir_cls_preds: (N, H, W, C3)
Returns:
batch_cls_preds: (B, num_boxes, num_classes)
batch_box_preds: (B, num_boxes, 7+C)
"""
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat([anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1])
for anchor in self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
num_anchors = anchors.view(-1, anchors.shape[-1]).shape[0]
batch_anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
batch_cls_preds = cls_preds.view(batch_size, num_anchors, -1).float() \
if not isinstance(cls_preds, list) else cls_preds
batch_box_preds = box_preds.view(batch_size, num_anchors, -1) if not isinstance(box_preds, list) \
else torch.cat(box_preds, dim=1).view(batch_size, num_anchors, -1)
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, batch_anchors)
if dir_cls_preds is not None:
dir_offset = self.model_cfg.DIR_OFFSET
dir_limit_offset = self.model_cfg.DIR_LIMIT_OFFSET
dir_cls_preds = dir_cls_preds.view(batch_size, num_anchors, -1) if not isinstance(dir_cls_preds, list) \
else torch.cat(dir_cls_preds, dim=1).view(batch_size, num_anchors, -1)
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = (2 * np.pi / self.model_cfg.NUM_DIR_BINS)
dir_rot = common_utils.limit_period(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
if isinstance(self.box_coder, box_coder_utils.PreviousResidualDecoder):
batch_box_preds[..., 6] = common_utils.limit_period(
-(batch_box_preds[..., 6] + np.pi / 2), offset=0.5, period=np.pi * 2
)
return batch_cls_preds, batch_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| 12,364
| 43.800725
| 118
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/anchor_head_multi.py
|
import numpy as np
import torch
import torch.nn as nn
from ..backbones_2d import BaseBEVBackbone
from .anchor_head_template import AnchorHeadTemplate
class SingleHead(BaseBEVBackbone):
def __init__(self, model_cfg, input_channels, num_class, num_anchors_per_location, code_size, rpn_head_cfg=None,
head_label_indices=None, separate_reg_config=None):
super().__init__(rpn_head_cfg, input_channels)
self.num_anchors_per_location = num_anchors_per_location
self.num_class = num_class
self.code_size = code_size
self.model_cfg = model_cfg
self.separate_reg_config = separate_reg_config
self.register_buffer('head_label_indices', head_label_indices)
if self.separate_reg_config is not None:
code_size_cnt = 0
self.conv_box = nn.ModuleDict()
self.conv_box_names = []
num_middle_conv = self.separate_reg_config.NUM_MIDDLE_CONV
num_middle_filter = self.separate_reg_config.NUM_MIDDLE_FILTER
conv_cls_list = []
c_in = input_channels
for k in range(num_middle_conv):
conv_cls_list.extend([
nn.Conv2d(
c_in, num_middle_filter,
kernel_size=3, stride=1, padding=1, bias=False
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU()
])
c_in = num_middle_filter
conv_cls_list.append(nn.Conv2d(
c_in, self.num_anchors_per_location * self.num_class,
kernel_size=3, stride=1, padding=1
))
self.conv_cls = nn.Sequential(*conv_cls_list)
for reg_config in self.separate_reg_config.REG_LIST:
reg_name, reg_channel = reg_config.split(':')
reg_channel = int(reg_channel)
cur_conv_list = []
c_in = input_channels
for k in range(num_middle_conv):
cur_conv_list.extend([
nn.Conv2d(
c_in, num_middle_filter,
kernel_size=3, stride=1, padding=1, bias=False
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU()
])
c_in = num_middle_filter
cur_conv_list.append(nn.Conv2d(
c_in, self.num_anchors_per_location * int(reg_channel),
kernel_size=3, stride=1, padding=1, bias=True
))
code_size_cnt += reg_channel
self.conv_box[f'conv_{reg_name}'] = nn.Sequential(*cur_conv_list)
self.conv_box_names.append(f'conv_{reg_name}')
for m in self.conv_box.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
assert code_size_cnt == code_size, f'Code size does not match: {code_size_cnt}:{code_size}'
else:
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
self.init_weights()
def init_weights(self):
pi = 0.01
if isinstance(self.conv_cls, nn.Conv2d):
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
else:
nn.init.constant_(self.conv_cls[-1].bias, -np.log((1 - pi) / pi))
def forward(self, spatial_features_2d):
ret_dict = {}
spatial_features_2d = super().forward({'spatial_features': spatial_features_2d})['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
if self.separate_reg_config is None:
box_preds = self.conv_box(spatial_features_2d)
else:
box_preds_list = []
for reg_name in self.conv_box_names:
box_preds_list.append(self.conv_box[reg_name](spatial_features_2d))
box_preds = torch.cat(box_preds_list, dim=1)
if not self.use_multihead:
box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
else:
H, W = box_preds.shape[2:]
batch_size = box_preds.shape[0]
box_preds = box_preds.view(-1, self.num_anchors_per_location,
self.code_size, H, W).permute(0, 1, 3, 4, 2).contiguous()
cls_preds = cls_preds.view(-1, self.num_anchors_per_location,
self.num_class, H, W).permute(0, 1, 3, 4, 2).contiguous()
box_preds = box_preds.view(batch_size, -1, self.code_size)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
if self.use_multihead:
dir_cls_preds = dir_cls_preds.view(
-1, self.num_anchors_per_location, self.model_cfg.NUM_DIR_BINS, H, W).permute(0, 1, 3, 4,
2).contiguous()
dir_cls_preds = dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
else:
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
else:
dir_cls_preds = None
ret_dict['cls_preds'] = cls_preds
ret_dict['box_preds'] = box_preds
ret_dict['dir_cls_preds'] = dir_cls_preds
return ret_dict
class AnchorHeadMulti(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size,
point_cloud_range=point_cloud_range, predict_boxes_when_training=predict_boxes_when_training
)
self.model_cfg = model_cfg
self.separate_multihead = self.model_cfg.get('SEPARATE_MULTIHEAD', False)
if self.model_cfg.get('SHARED_CONV_NUM_FILTER', None) is not None:
shared_conv_num_filter = self.model_cfg.SHARED_CONV_NUM_FILTER
self.shared_conv = nn.Sequential(
nn.Conv2d(input_channels, shared_conv_num_filter, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(shared_conv_num_filter, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
self.shared_conv = None
shared_conv_num_filter = input_channels
self.rpn_heads = None
self.make_multihead(shared_conv_num_filter)
def make_multihead(self, input_channels):
rpn_head_cfgs = self.model_cfg.RPN_HEAD_CFGS
rpn_heads = []
class_names = []
for rpn_head_cfg in rpn_head_cfgs:
class_names.extend(rpn_head_cfg['HEAD_CLS_NAME'])
for rpn_head_cfg in rpn_head_cfgs:
num_anchors_per_location = sum([self.num_anchors_per_location[class_names.index(head_cls)]
for head_cls in rpn_head_cfg['HEAD_CLS_NAME']])
head_label_indices = torch.from_numpy(np.array([
self.class_names.index(cur_name) + 1 for cur_name in rpn_head_cfg['HEAD_CLS_NAME']
]))
rpn_head = SingleHead(
self.model_cfg, input_channels,
len(rpn_head_cfg['HEAD_CLS_NAME']) if self.separate_multihead else self.num_class,
num_anchors_per_location, self.box_coder.code_size, rpn_head_cfg,
head_label_indices=head_label_indices,
separate_reg_config=self.model_cfg.get('SEPARATE_REG_CONFIG', None)
)
rpn_heads.append(rpn_head)
self.rpn_heads = nn.ModuleList(rpn_heads)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
if self.shared_conv is not None:
spatial_features_2d = self.shared_conv(spatial_features_2d)
ret_dicts = []
for rpn_head in self.rpn_heads:
ret_dicts.append(rpn_head(spatial_features_2d))
cls_preds = [ret_dict['cls_preds'] for ret_dict in ret_dicts]
box_preds = [ret_dict['box_preds'] for ret_dict in ret_dicts]
ret = {
'cls_preds': cls_preds if self.separate_multihead else torch.cat(cls_preds, dim=1),
'box_preds': box_preds if self.separate_multihead else torch.cat(box_preds, dim=1),
}
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', False):
dir_cls_preds = [ret_dict['dir_cls_preds'] for ret_dict in ret_dicts]
ret['dir_cls_preds'] = dir_cls_preds if self.separate_multihead else torch.cat(dir_cls_preds, dim=1)
self.forward_ret_dict.update(ret)
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=ret['cls_preds'], box_preds=ret['box_preds'], dir_cls_preds=ret.get('dir_cls_preds', None)
)
if isinstance(batch_cls_preds, list):
multihead_label_mapping = []
for idx in range(len(batch_cls_preds)):
multihead_label_mapping.append(self.rpn_heads[idx].head_label_indices)
data_dict['multihead_label_mapping'] = multihead_label_mapping
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
def get_cls_layer_loss(self):
loss_weights = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
if 'pos_cls_weight' in loss_weights:
pos_cls_weight = loss_weights['pos_cls_weight']
neg_cls_weight = loss_weights['neg_cls_weight']
else:
pos_cls_weight = neg_cls_weight = 1.0
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
batch_size = int(cls_preds[0].shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0 * neg_cls_weight
cls_weights = (negative_cls_weights + pos_cls_weight * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds[0].dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
start_idx = c_idx = 0
cls_losses = 0
for idx, cls_pred in enumerate(cls_preds):
cur_num_class = self.rpn_heads[idx].num_class
cls_pred = cls_pred.view(batch_size, -1, cur_num_class)
if self.separate_multihead:
one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1],
c_idx:c_idx + cur_num_class]
c_idx += cur_num_class
else:
one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1]]
cls_weight = cls_weights[:, start_idx:start_idx + cls_pred.shape[1]]
cls_loss_src = self.cls_loss_func(cls_pred, one_hot_target, weights=cls_weight) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * loss_weights['cls_weight']
cls_losses += cls_loss
start_idx += cls_pred.shape[1]
assert start_idx == one_hot_targets.shape[1]
tb_dict = {
'rpn_loss_cls': cls_losses.item()
}
return cls_losses, tb_dict
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if not isinstance(box_preds, list):
box_preds = [box_preds]
batch_size = int(box_preds[0].shape[0])
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1])
for anchor in self.anchors], dim=0
)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
start_idx = 0
box_losses = 0
tb_dict = {}
for idx, box_pred in enumerate(box_preds):
box_pred = box_pred.view(
batch_size, -1,
box_pred.shape[-1] // self.num_anchors_per_location if not self.use_multihead else box_pred.shape[-1]
)
box_reg_target = box_reg_targets[:, start_idx:start_idx + box_pred.shape[1]]
reg_weight = reg_weights[:, start_idx:start_idx + box_pred.shape[1]]
# sin(a - b) = sinacosb-cosasinb
if box_dir_cls_preds is not None:
box_pred_sin, reg_target_sin = self.add_sin_difference(box_pred, box_reg_target)
loc_loss_src = self.reg_loss_func(box_pred_sin, reg_target_sin, weights=reg_weight) # [N, M]
else:
loc_loss_src = self.reg_loss_func(box_pred, box_reg_target, weights=reg_weight) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
box_losses += loc_loss
tb_dict['rpn_loss_loc'] = tb_dict.get('rpn_loss_loc', 0) + loc_loss.item()
if box_dir_cls_preds is not None:
if not isinstance(box_dir_cls_preds, list):
box_dir_cls_preds = [box_dir_cls_preds]
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS
)
box_dir_cls_pred = box_dir_cls_preds[idx]
dir_logit = box_dir_cls_pred.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
weights = positives.type_as(dir_logit)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
weight = weights[:, start_idx:start_idx + box_pred.shape[1]]
dir_target = dir_targets[:, start_idx:start_idx + box_pred.shape[1]]
dir_loss = self.dir_loss_func(dir_logit, dir_target, weights=weight)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight']
box_losses += dir_loss
tb_dict['rpn_loss_dir'] = tb_dict.get('rpn_loss_dir', 0) + dir_loss.item()
start_idx += box_pred.shape[1]
return box_losses, tb_dict
| 17,041
| 44.566845
| 117
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/point_head_box.py
|
import torch
from ...utils import box_coder_utils, box_utils
from ...utils.loss_utils import PointSASALoss
from .point_head_template import PointHeadTemplate
class PointHeadBox(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PointRCNN.
Reference Paper: https://arxiv.org/abs/1812.04244
PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
target_cfg = self.model_cfg.TARGET_CONFIG
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
# SASA Loss
sasa_loss_cfg = model_cfg.LOSS_CONFIG.get('LOSS_SASA_CONFIG', None)
if sasa_loss_cfg is not None:
self.loss_point_sasa = PointSASALoss(**sasa_loss_cfg)
else:
self.loss_point_sasa = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=False, ret_box_labels=True
)
return targets_dict
def get_sasa_layer_loss(self):
point_loss_sasa_list = self.loss_point_sasa.loss_forward(
self.forward_ret_dict['point_sasa_preds'],
self.forward_ret_dict['point_sasa_labels']
)
point_loss_sasa = 0
tb_dict = dict()
for i in range(len(point_loss_sasa_list)):
cur_point_loss_sasa = point_loss_sasa_list[i]
if cur_point_loss_sasa is None:
continue
point_loss_sasa = point_loss_sasa + cur_point_loss_sasa
tb_dict['point_loss_sasa_layer_%d' % i] = point_loss_sasa_list[i].item()
tb_dict['point_loss_sasa'] = point_loss_sasa.item()
return point_loss_sasa, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss_box, tb_dict_2 = self.get_box_layer_loss()
point_loss = point_loss_cls + point_loss_box
tb_dict.update(tb_dict_1)
tb_dict.update(tb_dict_2)
if self.loss_point_sasa is not None:
point_loss_sasa, tb_dict_3 = self.get_sasa_layer_loss()
point_loss = point_loss + point_loss_sasa
tb_dict.update(tb_dict_3)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_box_preds = self.box_layers(point_features) # (total_points, box_code_size)
point_cls_preds_max, _ = point_cls_preds.max(dim=-1)
batch_dict['point_cls_scores'] = torch.sigmoid(point_cls_preds_max)
ret_dict = {'point_cls_preds': point_cls_preds,
'point_box_preds': point_box_preds}
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_box_labels'] = targets_dict['point_box_labels']
if self.loss_point_sasa is not None:
point_sasa_labels = self.loss_point_sasa(
batch_dict['point_coords_list'],
batch_dict['point_scores_list'],
batch_dict['gt_boxes']
)
ret_dict.update({
'point_sasa_preds': batch_dict['point_scores_list'],
'point_sasa_labels': point_sasa_labels
})
if not self.training or self.predict_boxes_when_training:
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=point_box_preds
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 6,616
| 41.146497
| 106
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/point_head_simple.py
|
import torch
from ...utils import box_utils
from .point_head_template import PointHeadTemplate
class PointHeadSimple(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=False
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds,
}
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
self.forward_ret_dict = ret_dict
return batch_dict
| 3,568
| 37.793478
| 106
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/__init__.py
|
from .anchor_head_multi import AnchorHeadMulti
from .anchor_head_single import AnchorHeadSingle
from .anchor_head_template import AnchorHeadTemplate
from .point_head_box import PointHeadBox
from .point_head_vote import PointHeadVote
from .point_head_simple import PointHeadSimple
from .point_intra_part_head import PointIntraPartOffsetHead
__all__ = {
'AnchorHeadTemplate': AnchorHeadTemplate,
'AnchorHeadSingle': AnchorHeadSingle,
'PointIntraPartOffsetHead': PointIntraPartOffsetHead,
'PointHeadSimple': PointHeadSimple,
'PointHeadBox': PointHeadBox,
'PointHeadVote': PointHeadVote,
'AnchorHeadMulti': AnchorHeadMulti,
}
| 651
| 35.222222
| 59
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/point_head_vote.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ...ops.iou3d_nms import iou3d_nms_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...utils import box_coder_utils, box_utils, common_utils, loss_utils
from .point_head_template import PointHeadTemplate
class PointHeadVote(PointHeadTemplate):
"""
A simple vote-based detection head, which is used for 3DSSD.
Reference Paper: https://arxiv.org/abs/2002.10187
3DSSD: Point-based 3D Single Stage Object Detector
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
use_bn = self.model_cfg.USE_BN
self.predict_boxes_when_training = predict_boxes_when_training
self.vote_cfg = self.model_cfg.VOTE_CONFIG
self.vote_layers = self.make_fc_layers(
input_channels=input_channels,
output_channels=3,
fc_list=self.vote_cfg.VOTE_FC
)
self.sa_cfg = self.model_cfg.SA_CONFIG
channel_in, channel_out = input_channels, 0
mlps = self.sa_cfg.MLPS.copy()
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_module = pointnet2_modules.PointnetSAModuleFSMSG(
radii=self.sa_cfg.RADIUS,
nsamples=self.sa_cfg.NSAMPLE,
mlps=mlps,
use_xyz=True,
bn=use_bn
)
channel_in = channel_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(channel_in, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
channel_in = self.model_cfg.SHARED_FC[k]
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
channel_in = self.model_cfg.SHARED_FC[-1]
self.cls_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=num_class if not self.model_cfg.LOSS_CONFIG.LOSS_CLS == 'CrossEntropy' else num_class + 1,
fc_list=self.model_cfg.CLS_FC
)
target_cfg = self.model_cfg.TARGET_CONFIG
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.reg_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=self.box_coder.code_size,
fc_list=self.model_cfg.REG_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def build_losses(self, losses_cfg):
# classification loss
if losses_cfg.LOSS_CLS.startswith('WeightedBinaryCrossEntropy'):
self.add_module(
'cls_loss_func',
loss_utils.WeightedBinaryCrossEntropyLoss()
)
elif losses_cfg.LOSS_CLS == 'WeightedCrossEntropy':
self.add_module(
'cls_loss_func',
loss_utils.WeightedCrossEntropyLoss()
)
elif losses_cfg.LOSS_CLS == 'FocalLoss':
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(
**losses_cfg.get('LOSS_CLS_CONFIG', {})
)
)
else:
raise NotImplementedError
# regression loss
if losses_cfg.LOSS_REG == 'WeightedSmoothL1Loss':
self.add_module(
'reg_loss_func',
loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None),
**losses_cfg.get('LOSS_REG_CONFIG', {})
)
)
elif losses_cfg.LOSS_REG == 'WeightedL1Loss':
self.add_module(
'reg_loss_func',
loss_utils.WeightedL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)
)
)
else:
raise NotImplementedError
# sasa loss
loss_sasa_cfg = losses_cfg.get('LOSS_SASA_CONFIG', None)
if loss_sasa_cfg is not None:
self.enable_sasa = True
self.add_module(
'loss_point_sasa',
loss_utils.PointSASALoss(**loss_sasa_cfg)
)
else:
self.enable_sasa = False
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
def assign_stack_targets_simple(self, points, gt_boxes, extend_gt_boxes=None, set_ignore_flag=True):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: (B, M, 8), required if set ignore flag
set_ignore_flag:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignore
point_reg_labels: (N1 + N2 + N3 + ..., 3), corresponding object centroid
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
assert not set_ignore_flag or extend_gt_boxes is not None
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
point_reg_labels = gt_boxes.new_zeros((points.shape[0], 3))
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if extend_gt_boxes is not None:
extend_box_idx_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idx_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[box_fg_flag]]
point_cls_labels_single[box_fg_flag] = 1
point_cls_labels[bs_mask] = point_cls_labels_single
point_reg_labels_single = point_reg_labels.new_zeros((bs_mask.sum(), 3))
point_reg_labels_single[box_fg_flag] = gt_box_of_fg_points[:, 0:3]
point_reg_labels[bs_mask] = point_reg_labels_single
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_reg_labels': point_reg_labels,
}
return targets_dict
def assign_targets_simple(self, points, gt_boxes, extra_width=None, set_ignore_flag=True):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extra_width: (dx, dy, dz) extra width applied to gt boxes
assign_method: binary or distance
set_ignore_flag:
Returns:
point_vote_labels: (N1 + N2 + N3 + ..., 3)
"""
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert points.shape.__len__() in [2], 'points.shape=%s' % str(points.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=extra_width
).view(batch_size, -1, gt_boxes.shape[-1]) \
if extra_width is not None else gt_boxes
if set_ignore_flag:
targets_dict = self.assign_stack_targets_simple(points=points, gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=set_ignore_flag)
else:
targets_dict = self.assign_stack_targets_simple(points=points, gt_boxes=extend_gt_boxes,
set_ignore_flag=set_ignore_flag)
return targets_dict
def assign_stack_targets_mask(self, points, gt_boxes, extend_gt_boxes=None,
set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
set_ignore_flag:
use_ball_constraint:
central_radius:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_reg_labels: (N1 + N2 + N3 + ..., code_size)
point_box_labels: (N1 + N2 + N3 + ..., 7)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
assert set_ignore_flag != use_ball_constraint, 'Choose one only!'
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = gt_boxes.new_zeros(points.shape[0]).long()
point_reg_labels = gt_boxes.new_zeros((points.shape[0], self.box_coder.code_size))
point_box_labels = gt_boxes.new_zeros((points.shape[0], gt_boxes.size(2) - 1))
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if set_ignore_flag:
extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()
).long().squeeze(dim=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius)
fg_flag = box_fg_flag & ball_flag
ignore_flag = fg_flag ^ box_fg_flag
point_cls_labels_single[ignore_flag] = -1
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()
point_cls_labels[bs_mask] = point_cls_labels_single
if gt_box_of_fg_points.shape[0] > 0:
point_reg_labels_single = point_reg_labels.new_zeros((bs_mask.sum(), self.box_coder.code_size))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long()
)
point_reg_labels_single[fg_flag] = fg_point_box_labels
point_reg_labels[bs_mask] = point_reg_labels_single
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), gt_boxes.size(2) - 1))
point_box_labels_single[fg_flag] = gt_box_of_fg_points[:, :-1]
point_box_labels[bs_mask] = point_box_labels_single
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_reg_labels': point_reg_labels,
'point_box_labels': point_box_labels
}
return targets_dict
def assign_stack_targets_iou(self, points, pred_boxes, gt_boxes,
pos_iou_threshold=0.5, neg_iou_threshold=0.35):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
pred_boxes: (N, 7/8)
gt_boxes: (B, M, 8)
pos_iou_threshold:
neg_iou_threshold:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_reg_labels: (N1 + N2 + N3 + ..., code_size)
point_box_labels: (N1 + N2 + N3 + ..., 7)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(pred_boxes.shape) == 2 and pred_boxes.shape[1] >= 7, 'pred_boxes.shape=%s' % str(pred_boxes.shape)
assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = gt_boxes.new_zeros(pred_boxes.shape[0]).long()
point_reg_labels = gt_boxes.new_zeros((pred_boxes.shape[0], self.box_coder.code_size))
point_box_labels = gt_boxes.new_zeros((pred_boxes.shape[0], 7))
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
pred_boxes_single = pred_boxes[bs_mask]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
pred_boxes_iou = iou3d_nms_utils.boxes_iou3d_gpu(
pred_boxes_single,
gt_boxes[k][:, :7]
)
pred_boxes_iou, box_idxs_of_pts = torch.max(pred_boxes_iou, dim=-1)
fg_flag = pred_boxes_iou > pos_iou_threshold
ignore_flag = (pred_boxes_iou > neg_iou_threshold) ^ fg_flag
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()
point_cls_labels_single[ignore_flag] = -1
point_cls_labels[bs_mask] = point_cls_labels_single
if gt_box_of_fg_points.shape[0] > 0:
point_reg_labels_single = point_reg_labels.new_zeros((bs_mask.sum(), self.box_coder.code_size))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long()
)
point_reg_labels_single[fg_flag] = fg_point_box_labels
point_reg_labels[bs_mask] = point_reg_labels_single
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 7))
point_box_labels_single[fg_flag] = gt_box_of_fg_points[:, :-1]
point_box_labels[bs_mask] = point_box_labels_single
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_reg_labels': point_reg_labels,
'point_box_labels': point_box_labels
}
return targets_dict
def assign_targets(self, input_dict):
"""
Args:
input_dict:
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
assign_method = self.model_cfg.TARGET_CONFIG.ASSIGN_METHOD # mask or iou
if assign_method == 'mask':
points = input_dict['point_vote_coords']
gt_boxes = input_dict['gt_boxes']
assert points.shape.__len__() == 2, 'points.shape=%s' % str(points.shape)
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
central_radius = self.model_cfg.TARGET_CONFIG.get('GT_CENTRAL_RADIUS', 2.0)
targets_dict = self.assign_stack_targets_mask(
points=points, gt_boxes=gt_boxes,
set_ignore_flag=False, use_ball_constraint=True, central_radius=central_radius
)
elif assign_method == 'iou':
points = input_dict['point_vote_coords']
pred_boxes = input_dict['point_box_preds']
gt_boxes = input_dict['gt_boxes']
assert points.shape.__len__() == 2, 'points.shape=%s' % str(points.shape)
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert pred_boxes.shape.__len__() == 2, 'pred_boxes.shape=%s' % str(pred_boxes.shape)
pos_iou_threshold = self.model_cfg.TARGET_CONFIG.POS_IOU_THRESHOLD
neg_iou_threshold = self.model_cfg.TARGET_CONFIG.NEG_IOU_THRESHOLD
targets_dict = self.assign_stack_targets_iou(
points=points, pred_boxes=pred_boxes, gt_boxes=gt_boxes,
pos_iou_threshold=pos_iou_threshold, neg_iou_threshold=neg_iou_threshold
)
else:
raise NotImplementedError
return targets_dict
def get_vote_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['vote_cls_labels'] > 0
vote_reg_labels = self.forward_ret_dict['vote_reg_labels']
vote_reg_preds = self.forward_ret_dict['point_vote_coords']
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
vote_loss_reg_src = self.reg_loss_func(
vote_reg_preds[None, ...],
vote_reg_labels[None, ...],
weights=reg_weights[None, ...])
vote_loss_reg = vote_loss_reg_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
vote_loss_reg = vote_loss_reg * loss_weights_dict['vote_reg_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'vote_loss_reg': vote_loss_reg.item()})
return vote_loss_reg, tb_dict
@torch.no_grad()
def generate_centerness_label(self, point_base, point_box_labels, pos_mask, epsilon=1e-6):
"""
Args:
point_base: (N1 + N2 + N3 + ..., 3)
point_box_labels: (N1 + N2 + N3 + ..., 7)
pos_mask: (N1 + N2 + N3 + ...)
epsilon:
Returns:
centerness_label: (N1 + N2 + N3 + ...)
"""
centerness = point_box_labels.new_zeros(pos_mask.shape)
point_box_labels = point_box_labels[pos_mask, :]
canonical_xyz = point_base[pos_mask, :] - point_box_labels[:, :3]
rys = point_box_labels[:, -1]
canonical_xyz = common_utils.rotate_points_along_z(
canonical_xyz.unsqueeze(dim=1), -rys
).squeeze(dim=1)
distance_front = point_box_labels[:, 3] / 2 - canonical_xyz[:, 0]
distance_back = point_box_labels[:, 3] / 2 + canonical_xyz[:, 0]
distance_left = point_box_labels[:, 4] / 2 - canonical_xyz[:, 1]
distance_right = point_box_labels[:, 4] / 2 + canonical_xyz[:, 1]
distance_top = point_box_labels[:, 5] / 2 - canonical_xyz[:, 2]
distance_bottom = point_box_labels[:, 5] / 2 + canonical_xyz[:, 2]
centerness_l = torch.min(distance_front, distance_back) / torch.max(distance_front, distance_back)
centerness_w = torch.min(distance_left, distance_right) / torch.max(distance_left, distance_right)
centerness_h = torch.min(distance_top, distance_bottom) / torch.max(distance_top, distance_bottom)
centerness_pos = torch.clamp(centerness_l * centerness_w * centerness_h, min=epsilon) ** (1 / 3.0)
centerness[pos_mask] = centerness_pos
return centerness
def get_axis_aligned_iou_loss_lidar(self, pred_boxes: torch.Tensor, gt_boxes: torch.Tensor):
"""
Args:
pred_boxes: (N, 7) float Tensor.
gt_boxes: (N, 7) float Tensor.
Returns:
iou_loss: (N) float Tensor.
"""
assert pred_boxes.shape[0] == gt_boxes.shape[0]
pos_p, len_p, *cps = torch.split(pred_boxes, 3, dim=-1)
pos_g, len_g, *cgs = torch.split(gt_boxes, 3, dim=-1)
len_p = torch.clamp(len_p, min=1e-5)
len_g = torch.clamp(len_g, min=1e-5)
vol_p = len_p.prod(dim=-1)
vol_g = len_g.prod(dim=-1)
min_p, max_p = pos_p - len_p / 2, pos_p + len_p / 2
min_g, max_g = pos_g - len_g / 2, pos_g + len_g / 2
min_max = torch.min(max_p, max_g)
max_min = torch.max(min_p, min_g)
diff = torch.clamp(min_max - max_min, min=0)
intersection = diff.prod(dim=-1)
union = vol_p + vol_g - intersection
iou_axis_aligned = intersection / torch.clamp(union, min=1e-5)
iou_loss = 1 - iou_axis_aligned
return iou_loss
def get_corner_loss_lidar(self, pred_boxes: torch.Tensor, gt_boxes: torch.Tensor):
"""
Args:
pred_boxes: (N, 7) float Tensor.
gt_boxes: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_boxes.shape[0] == gt_boxes.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_boxes)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_boxes)
gt_boxes_flip = gt_boxes.clone()
gt_boxes_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_boxes_flip)
# (N, 8, 3)
corner_loss = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(pred_box_corners - gt_box_corners, 1.0)
corner_loss_flip = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(pred_box_corners - gt_box_corners_flip, 1.0)
corner_loss = torch.min(corner_loss.sum(dim=2), corner_loss_flip.sum(dim=2))
return corner_loss.mean(dim=1)
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1)
point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class)
positives = point_cls_labels > 0
negatives = point_cls_labels == 0
cls_weights = positives * 1.0 + negatives * 1.0
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
self.forward_ret_dict['point_cls_labels_onehot'] = one_hot_targets
loss_cfgs = self.model_cfg.LOSS_CONFIG
if 'WithCenterness' in loss_cfgs.LOSS_CLS:
point_base = self.forward_ret_dict['point_vote_coords']
point_box_labels = self.forward_ret_dict['point_box_labels']
centerness_label = self.generate_centerness_label(point_base, point_box_labels, positives)
loss_cls_cfg = loss_cfgs.get('LOSS_CLS_CONFIG', None)
centerness_min = loss_cls_cfg['centerness_min'] if loss_cls_cfg is not None else 0.0
centerness_max = loss_cls_cfg['centerness_max'] if loss_cls_cfg is not None else 1.0
centerness_label = centerness_min + (centerness_max - centerness_min) * centerness_label
one_hot_targets *= centerness_label.unsqueeze(dim=-1)
point_loss_cls = self.cls_loss_func(point_cls_preds, one_hot_targets[..., 1:], weights=cls_weights)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'point_pos_num': positives.sum().item()
})
return point_loss_cls, cls_weights, tb_dict # point_loss_cls: (N)
def get_box_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
point_reg_preds = self.forward_ret_dict['point_reg_preds']
point_reg_labels = self.forward_ret_dict['point_reg_labels']
reg_weights = pos_mask.float()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
if tb_dict is None:
tb_dict = {}
point_loss_offset_reg = self.reg_loss_func(
point_reg_preds[None, :, :6],
point_reg_labels[None, :, :6],
weights=reg_weights[None, ...]
)
point_loss_offset_reg = point_loss_offset_reg.sum(dim=-1).squeeze()
if hasattr(self.box_coder, 'pred_velo') and self.box_coder.pred_velo:
point_loss_velo_reg = self.reg_loss_func(
point_reg_preds[None, :, 6 + 2 * self.box_coder.angle_bin_num:8 + 2 * self.box_coder.angle_bin_num],
point_reg_labels[None, :, 6 + 2 * self.box_coder.angle_bin_num:8 + 2 * self.box_coder.angle_bin_num],
weights=reg_weights[None, ...]
)
point_loss_velo_reg = point_loss_velo_reg.sum(dim=-1).squeeze()
point_loss_offset_reg = point_loss_offset_reg + point_loss_velo_reg
point_loss_offset_reg *= loss_weights_dict['point_offset_reg_weight']
if isinstance(self.box_coder, box_coder_utils.PointBinResidualCoder):
point_angle_cls_labels = \
point_reg_labels[:, 6:6 + self.box_coder.angle_bin_num]
point_loss_angle_cls = F.cross_entropy( # angle bin cls
point_reg_preds[:, 6:6 + self.box_coder.angle_bin_num],
point_angle_cls_labels.argmax(dim=-1), reduction='none') * reg_weights
point_angle_reg_preds = point_reg_preds[:, 6 + self.box_coder.angle_bin_num:6 + 2 * self.box_coder.angle_bin_num]
point_angle_reg_labels = point_reg_labels[:, 6 + self.box_coder.angle_bin_num:6 + 2 * self.box_coder.angle_bin_num]
point_angle_reg_preds = (point_angle_reg_preds * point_angle_cls_labels).sum(dim=-1, keepdim=True)
point_angle_reg_labels = (point_angle_reg_labels * point_angle_cls_labels).sum(dim=-1, keepdim=True)
point_loss_angle_reg = self.reg_loss_func(
point_angle_reg_preds[None, ...],
point_angle_reg_labels[None, ...],
weights=reg_weights[None, ...]
)
point_loss_angle_reg = point_loss_angle_reg.squeeze()
point_loss_angle_cls *= loss_weights_dict['point_angle_cls_weight']
point_loss_angle_reg *= loss_weights_dict['point_angle_reg_weight']
point_loss_box = point_loss_offset_reg + point_loss_angle_cls + point_loss_angle_reg # (N)
else:
point_angle_reg_preds = point_reg_preds[:, 6:]
point_angle_reg_labels = point_reg_labels[:, 6:]
point_loss_angle_reg = self.reg_loss_func(
point_angle_reg_preds[None, ...],
point_angle_reg_labels[None, ...],
weights=reg_weights[None, ...]
)
point_loss_angle_reg *= loss_weights_dict['point_angle_reg_weight']
point_loss_box = point_loss_offset_reg + point_loss_angle_reg
if reg_weights.sum() > 0:
point_box_preds = self.forward_ret_dict['point_box_preds']
point_box_labels = self.forward_ret_dict['point_box_labels']
point_loss_box_aux = 0
if self.model_cfg.LOSS_CONFIG.get('AXIS_ALIGNED_IOU_LOSS_REGULARIZATION', False):
point_loss_iou = self.get_axis_aligned_iou_loss_lidar(
point_box_preds[pos_mask, :],
point_box_labels[pos_mask, :]
)
point_loss_iou *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['point_iou_weight']
point_loss_box_aux = point_loss_box_aux + point_loss_iou
if self.model_cfg.LOSS_CONFIG.get('CORNER_LOSS_REGULARIZATION', False):
point_loss_corner = self.get_corner_loss_lidar(
point_box_preds[pos_mask, 0:7],
point_box_labels[pos_mask, 0:7]
)
point_loss_corner *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['point_corner_weight']
point_loss_box_aux = point_loss_box_aux + point_loss_corner
point_loss_box[pos_mask] = point_loss_box[pos_mask] + point_loss_box_aux
return point_loss_box, reg_weights, tb_dict # point_loss_box: (N)
def get_sasa_layer_loss(self, tb_dict=None):
if self.enable_sasa:
point_loss_sasa_list = self.loss_point_sasa.loss_forward(
self.forward_ret_dict['point_sasa_preds'],
self.forward_ret_dict['point_sasa_labels']
)
point_loss_sasa = 0
tb_dict = dict()
for i in range(len(point_loss_sasa_list)):
cur_point_loss_sasa = point_loss_sasa_list[i]
if cur_point_loss_sasa is None:
continue
point_loss_sasa = point_loss_sasa + cur_point_loss_sasa
tb_dict['point_loss_sasa_layer_%d' % i] = point_loss_sasa_list[i].item()
tb_dict['point_loss_sasa'] = point_loss_sasa.item()
return point_loss_sasa, tb_dict
else:
return None, None
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_vote, tb_dict_0 = self.get_vote_layer_loss()
point_loss_cls, cls_weights, tb_dict_1 = self.get_cls_layer_loss()
point_loss_box, box_weights, tb_dict_2 = self.get_box_layer_loss()
point_loss_cls = point_loss_cls.sum() / torch.clamp(cls_weights.sum(), min=1.0)
point_loss_box = point_loss_box.sum() / torch.clamp(box_weights.sum(), min=1.0)
tb_dict.update({
'point_loss_vote': point_loss_vote.item(),
'point_loss_cls': point_loss_cls.item(),
'point_loss_box': point_loss_box.item()
})
point_loss = point_loss_vote + point_loss_cls + point_loss_box
tb_dict.update(tb_dict_0)
tb_dict.update(tb_dict_1)
tb_dict.update(tb_dict_2)
point_loss_sasa, tb_dict_3 = self.get_sasa_layer_loss()
if point_loss_sasa is not None:
tb_dict.update(tb_dict_3)
point_loss += point_loss_sasa
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_scores (optional): (B, N)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
batch_size = batch_dict['batch_size']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
batch_idx, point_coords = point_coords[:, 0], point_coords[:, 1:4]
batch_idx = batch_idx.view(batch_size, -1, 1)
point_coords = point_coords.view(batch_size, -1, 3).contiguous()
point_features = point_features.reshape(
batch_size,
point_coords.size(1),
-1
).permute(0, 2, 1).contiguous()
# candidate points sampling
sample_range = self.model_cfg.SAMPLE_RANGE
sample_batch_idx = batch_idx[:, sample_range[0]:sample_range[1], :].contiguous()
candidate_coords = point_coords[:, sample_range[0]:sample_range[1], :].contiguous()
candidate_features = point_features[:, :, sample_range[0]:sample_range[1]].contiguous()
# generate vote points
vote_offsets = self.vote_layers(candidate_features) # (B, 3, N)
vote_translation_range = np.array(self.vote_cfg.MAX_TRANSLATION_RANGE, dtype=np.float32)
vote_translation_range = torch.from_numpy(vote_translation_range).cuda().unsqueeze(dim=0).unsqueeze(dim=-1)
vote_offsets = torch.max(vote_offsets, -vote_translation_range)
vote_offsets = torch.min(vote_offsets, vote_translation_range)
vote_coords = candidate_coords + vote_offsets.permute(0, 2, 1).contiguous()
ret_dict = {'batch_size': batch_size,
'point_candidate_coords': candidate_coords.view(-1, 3).contiguous(),
'point_vote_coords': vote_coords.view(-1, 3).contiguous()}
sample_batch_idx_flatten = sample_batch_idx.view(-1, 1).contiguous() # (N, 1)
batch_dict['batch_index'] = sample_batch_idx_flatten.squeeze(-1)
batch_dict['point_candidate_coords'] = torch.cat( # (N, 4)
(sample_batch_idx_flatten, ret_dict['point_candidate_coords']), dim=-1)
batch_dict['point_vote_coords'] = torch.cat( # (N, 4)
(sample_batch_idx_flatten, ret_dict['point_vote_coords']), dim=-1)
if self.training: # assign targets for vote loss
extra_width = self.model_cfg.TARGET_CONFIG.get('VOTE_EXTRA_WIDTH', None)
targets_dict = self.assign_targets_simple(batch_dict['point_candidate_coords'],
batch_dict['gt_boxes'],
extra_width=extra_width,
set_ignore_flag=False)
ret_dict['vote_cls_labels'] = targets_dict['point_cls_labels'] # (N)
ret_dict['vote_reg_labels'] = targets_dict['point_reg_labels'] # (N, 3)
_, point_features, _ = self.SA_module(
point_coords,
point_features,
new_xyz=vote_coords
)
point_features = self.shared_fc_layer(point_features)
point_cls_preds = self.cls_layers(point_features)
point_reg_preds = self.reg_layers(point_features)
point_cls_preds = point_cls_preds.permute(0, 2, 1).contiguous()
point_cls_preds = point_cls_preds.view(-1, point_cls_preds.shape[-1]).contiguous()
point_reg_preds = point_reg_preds.permute(0, 2, 1).contiguous()
point_reg_preds = point_reg_preds.view(-1, point_reg_preds.shape[-1]).contiguous()
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'] = point_cls_scores
point_box_preds = self.box_coder.decode_torch(point_reg_preds,
ret_dict['point_vote_coords'])
batch_dict['point_box_preds'] = point_box_preds
ret_dict.update({'point_cls_preds': point_cls_preds,
'point_reg_preds': point_reg_preds,
'point_box_preds': point_box_preds,
'point_cls_scores': point_cls_scores})
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_reg_labels'] = targets_dict['point_reg_labels']
ret_dict['point_box_labels'] = targets_dict['point_box_labels']
if self.enable_sasa:
point_sasa_labels = self.loss_point_sasa(
batch_dict['point_coords_list'],
batch_dict['point_scores_list'],
batch_dict['gt_boxes']
)
ret_dict.update({
'point_sasa_preds': batch_dict['point_scores_list'],
'point_sasa_labels': point_sasa_labels
})
if not self.training or self.predict_boxes_when_training:
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_vote_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=point_reg_preds
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 37,917
| 45.754624
| 127
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/point_intra_part_head.py
|
import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointIntraPartOffsetHead(PointHeadTemplate):
"""
Point-based head for predicting the intra-object part locations.
Reference Paper: https://arxiv.org/abs/1907.03670
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
self.part_reg_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.PART_FC,
input_channels=input_channels,
output_channels=3
)
target_cfg = self.model_cfg.TARGET_CONFIG
if target_cfg.get('BOX_CODER', None) is not None:
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
else:
self.box_layers = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=True, ret_box_labels=(self.box_layers is not None)
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict = self.get_cls_layer_loss(tb_dict)
point_loss_part, tb_dict = self.get_part_layer_loss(tb_dict)
point_loss = point_loss_cls + point_loss_part
if self.box_layers is not None:
point_loss_box, tb_dict = self.get_box_layer_loss(tb_dict)
point_loss += point_loss_box
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_part_preds = self.part_reg_layers(point_features)
ret_dict = {
'point_cls_preds': point_cls_preds,
'point_part_preds': point_part_preds,
}
if self.box_layers is not None:
point_box_preds = self.box_layers(point_features)
ret_dict['point_box_preds'] = point_box_preds
point_cls_scores = torch.sigmoid(point_cls_preds)
point_part_offset = torch.sigmoid(point_part_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
batch_dict['point_part_offset'] = point_part_offset
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_part_labels'] = targets_dict.get('point_part_labels')
ret_dict['point_box_labels'] = targets_dict.get('point_box_labels')
if self.box_layers is not None and (not self.training or self.predict_boxes_when_training):
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=ret_dict['point_box_preds']
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 5,568
| 42.507813
| 107
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/target_assigner/anchor_generator.py
|
import torch
class AnchorGenerator(object):
def __init__(self, anchor_range, anchor_generator_config):
super().__init__()
self.anchor_generator_cfg = anchor_generator_config
self.anchor_range = anchor_range
self.anchor_sizes = [config['anchor_sizes'] for config in anchor_generator_config]
self.anchor_rotations = [config['anchor_rotations'] for config in anchor_generator_config]
self.anchor_heights = [config['anchor_bottom_heights'] for config in anchor_generator_config]
self.align_center = [config.get('align_center', False) for config in anchor_generator_config]
assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(self.anchor_heights)
self.num_of_anchor_sets = len(self.anchor_sizes)
def generate_anchors(self, grid_sizes):
assert len(grid_sizes) == self.num_of_anchor_sets
all_anchors = []
num_anchors_per_location = []
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes, self.anchor_sizes, self.anchor_rotations, self.anchor_heights, self.align_center):
num_anchors_per_location.append(len(anchor_rotation) * len(anchor_size) * len(anchor_height))
if align_center:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (grid_size[0] - 1)
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (grid_size[1] - 1)
x_offset, y_offset = 0, 0
x_shifts = torch.arange(
self.anchor_range[0] + x_offset, self.anchor_range[3] + 1e-5, step=x_stride, dtype=torch.float32,
).cuda()
y_shifts = torch.arange(
self.anchor_range[1] + y_offset, self.anchor_range[4] + 1e-5, step=y_stride, dtype=torch.float32,
).cuda()
z_shifts = x_shifts.new_tensor(anchor_height)
num_anchor_size, num_anchor_rotation = anchor_size.__len__(), anchor_rotation.__len__()
anchor_rotation = x_shifts.new_tensor(anchor_rotation)
anchor_size = x_shifts.new_tensor(anchor_size)
x_shifts, y_shifts, z_shifts = torch.meshgrid([
x_shifts, y_shifts, z_shifts
]) # [x_grid, y_grid, z_grid]
anchors = torch.stack((x_shifts, y_shifts, z_shifts), dim=-1) # [x, y, z, 3]
anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1)
anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat([*anchors.shape[0:3], 1, 1])
anchors = torch.cat((anchors, anchor_size), dim=-1)
anchors = anchors[:, :, :, :, None, :].repeat(1, 1, 1, 1, num_anchor_rotation, 1)
anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat([*anchors.shape[0:3], num_anchor_size, 1, 1])
anchors = torch.cat((anchors, anchor_rotation), dim=-1) # [x, y, z, num_size, num_rot, 7]
anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous()
#anchors = anchors.view(-1, anchors.shape[-1])
anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers
all_anchors.append(anchors)
return all_anchors, num_anchors_per_location
if __name__ == '__main__':
from easydict import EasyDict
config = [
EasyDict({
'anchor_sizes': [[2.1, 4.7, 1.7], [0.86, 0.91, 1.73], [0.84, 1.78, 1.78]],
'anchor_rotations': [0, 1.57],
'anchor_heights': [0, 0.5]
})
]
A = AnchorGenerator(
anchor_range=[-75.2, -75.2, -2, 75.2, 75.2, 4],
anchor_generator_config=config
)
import pdb
pdb.set_trace()
A.generate_anchors([[188, 188]])
| 3,990
| 48.8875
| 122
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py
|
import numpy as np
import torch
from ....ops.iou3d_nms import iou3d_nms_utils
from ....utils import box_utils
class AxisAlignedTargetAssigner(object):
def __init__(self, model_cfg, class_names, box_coder, match_height=False):
super().__init__()
anchor_generator_cfg = model_cfg.ANCHOR_GENERATOR_CONFIG
anchor_target_cfg = model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = box_coder
self.match_height = match_height
self.class_names = np.array(class_names)
self.anchor_class_names = [config['class_name'] for config in anchor_generator_cfg]
self.pos_fraction = anchor_target_cfg.POS_FRACTION if anchor_target_cfg.POS_FRACTION >= 0 else None
self.sample_size = anchor_target_cfg.SAMPLE_SIZE
self.norm_by_num_examples = anchor_target_cfg.NORM_BY_NUM_EXAMPLES
self.matched_thresholds = {}
self.unmatched_thresholds = {}
for config in anchor_generator_cfg:
self.matched_thresholds[config['class_name']] = config['matched_threshold']
self.unmatched_thresholds[config['class_name']] = config['unmatched_threshold']
self.use_multihead = model_cfg.get('USE_MULTIHEAD', False)
self.seperate_multihead = model_cfg.get('SEPARATE_MULTIHEAD', False)
if self.seperate_multihead:
rpn_head_cfgs = model_cfg.RPN_HEAD_CFGS
self.gt_remapping = {}
for rpn_head_cfg in rpn_head_cfgs:
for idx, name in enumerate(rpn_head_cfg['HEAD_CLS_NAME']):
self.gt_remapping[name] = idx + 1
def assign_targets(self, all_anchors, gt_boxes_with_classes):
"""
Args:
all_anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
bbox_targets = []
cls_labels = []
reg_weights = []
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1].int()
target_list = []
for anchor_class_name, anchors in zip(self.anchor_class_names, all_anchors):
if cur_gt_classes.shape[0] > 1:
mask = torch.from_numpy(self.class_names[cur_gt_classes.cpu() - 1] == anchor_class_name)
else:
mask = torch.tensor([self.class_names[c - 1] == anchor_class_name
for c in cur_gt_classes], dtype=torch.bool)
if self.use_multihead:
anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1])
if self.seperate_multihead:
selected_classes = cur_gt_classes[mask].clone()
if len(selected_classes) > 0:
new_cls_id = self.gt_remapping[anchor_class_name]
selected_classes[:] = new_cls_id
else:
selected_classes = cur_gt_classes[mask]
else:
feature_map_size = anchors.shape[:3]
anchors = anchors.view(-1, anchors.shape[-1])
selected_classes = cur_gt_classes[mask]
single_target = self.assign_targets_single(
anchors,
cur_gt[mask],
gt_classes=selected_classes,
matched_threshold=self.matched_thresholds[anchor_class_name],
unmatched_threshold=self.unmatched_thresholds[anchor_class_name]
)
target_list.append(single_target)
if self.use_multihead:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(-1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(-1, self.box_coder.code_size) for t in target_list],
'reg_weights': [t['reg_weights'].view(-1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(target_dict['box_reg_targets'], dim=0)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=0).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=0).view(-1)
else:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(*feature_map_size, -1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(*feature_map_size, -1, self.box_coder.code_size)
for t in target_list],
'reg_weights': [t['reg_weights'].view(*feature_map_size, -1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(
target_dict['box_reg_targets'], dim=-2
).view(-1, self.box_coder.code_size)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=-1).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=-1).view(-1)
bbox_targets.append(target_dict['box_reg_targets'])
cls_labels.append(target_dict['box_cls_labels'])
reg_weights.append(target_dict['reg_weights'])
bbox_targets = torch.stack(bbox_targets, dim=0)
cls_labels = torch.stack(cls_labels, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
all_targets_dict = {
'box_cls_labels': cls_labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights
}
return all_targets_dict
def assign_targets_single(self, anchors,
gt_boxes,
gt_classes,
matched_threshold=0.6,
unmatched_threshold=0.45
):
num_anchors = anchors.shape[0]
num_gt = gt_boxes.shape[0]
labels = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
gt_ids = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
anchor_by_gt_overlap = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) \
if self.match_height else box_utils.boxes3d_nearest_bev_iou(anchors[:, 0:7], gt_boxes[:, 0:7])
anchor_to_gt_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=1)).cuda()
anchor_to_gt_max = anchor_by_gt_overlap[
torch.arange(num_anchors, device=anchors.device), anchor_to_gt_argmax
]
gt_to_anchor_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=0)).cuda()
gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax, torch.arange(num_gt, device=anchors.device)]
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
anchors_with_max_overlap = (anchor_by_gt_overlap == gt_to_anchor_max).nonzero()[:, 0]
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force.int()
pos_inds = anchor_to_gt_max >= matched_threshold
gt_inds_over_thresh = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds_over_thresh]
gt_ids[pos_inds] = gt_inds_over_thresh.int()
bg_inds = (anchor_to_gt_max < unmatched_threshold).nonzero()[:, 0]
else:
bg_inds = torch.arange(num_anchors, device=anchors.device)
fg_inds = (labels > 0).nonzero()[:, 0]
if self.pos_fraction is not None:
num_fg = int(self.pos_fraction * self.sample_size)
if len(fg_inds) > num_fg:
num_disabled = len(fg_inds) - num_fg
disable_inds = torch.randperm(len(fg_inds))[:num_disabled]
labels[disable_inds] = -1
fg_inds = (labels > 0).nonzero()[:, 0]
num_bg = self.sample_size - (labels > 0).sum()
if len(bg_inds) > num_bg:
enable_inds = bg_inds[torch.randint(0, len(bg_inds), size=(num_bg,))]
labels[enable_inds] = 0
# bg_inds = torch.nonzero(labels == 0)[:, 0]
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = anchors.new_zeros((num_anchors, self.box_coder.code_size))
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :]
fg_anchors = anchors[fg_inds, :]
bbox_targets[fg_inds, :] = self.box_coder.encode_torch(fg_gt_boxes, fg_anchors)
reg_weights = anchors.new_zeros((num_anchors,))
if self.norm_by_num_examples:
num_examples = (labels >= 0).sum()
num_examples = num_examples if num_examples > 1.0 else 1.0
reg_weights[labels > 0] = 1.0 / num_examples
else:
reg_weights[labels > 0] = 1.0
ret_dict = {
'box_cls_labels': labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights,
}
return ret_dict
| 9,874
| 45.14486
| 118
|
py
|
SASA
|
SASA-main/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py
|
import torch
from ....ops.iou3d_nms import iou3d_nms_utils
from ....utils import common_utils
class ATSSTargetAssigner(object):
"""
Reference: https://arxiv.org/abs/1912.02424
"""
def __init__(self, topk, box_coder, match_height=False):
self.topk = topk
self.box_coder = box_coder
self.match_height = match_height
def assign_targets(self, anchors_list, gt_boxes_with_classes, use_multihead=False):
"""
Args:
anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
if not isinstance(anchors_list, list):
anchors_list = [anchors_list]
single_set_of_anchor = True
else:
single_set_of_anchor = len(anchors_list) == 1
cls_labels_list, reg_targets_list, reg_weights_list = [], [], []
for anchors in anchors_list:
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
if use_multihead:
anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1])
else:
anchors = anchors.view(-1, anchors.shape[-1])
cls_labels, reg_targets, reg_weights = [], [], []
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1]
cur_cls_labels, cur_reg_targets, cur_reg_weights = self.assign_targets_single(
anchors, cur_gt, cur_gt_classes
)
cls_labels.append(cur_cls_labels)
reg_targets.append(cur_reg_targets)
reg_weights.append(cur_reg_weights)
cls_labels = torch.stack(cls_labels, dim=0)
reg_targets = torch.stack(reg_targets, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
cls_labels_list.append(cls_labels)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
if single_set_of_anchor:
ret_dict = {
'box_cls_labels': cls_labels_list[0],
'box_reg_targets': reg_targets_list[0],
'reg_weights': reg_weights_list[0]
}
else:
ret_dict = {
'box_cls_labels': torch.cat(cls_labels_list, dim=1),
'box_reg_targets': torch.cat(reg_targets_list, dim=1),
'reg_weights': torch.cat(reg_weights_list, dim=1)
}
return ret_dict
def assign_targets_single(self, anchors, gt_boxes, gt_classes):
"""
Args:
anchors: (N, 7) [x, y, z, dx, dy, dz, heading]
gt_boxes: (M, 7) [x, y, z, dx, dy, dz, heading]
gt_classes: (M)
Returns:
"""
num_anchor = anchors.shape[0]
num_gt = gt_boxes.shape[0]
# select topk anchors for each gt_boxes
if self.match_height:
ious = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) # (N, M)
else:
ious = iou3d_nms_utils.boxes_iou_bev(anchors[:, 0:7], gt_boxes[:, 0:7])
distance = (anchors[:, None, 0:3] - gt_boxes[None, :, 0:3]).norm(dim=-1) # (N, M)
_, topk_idxs = distance.topk(self.topk, dim=0, largest=False) # (K, M)
candidate_ious = ious[topk_idxs, torch.arange(num_gt)] # (K, M)
iou_mean_per_gt = candidate_ious.mean(dim=0)
iou_std_per_gt = candidate_ious.std(dim=0)
iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt + 1e-6
is_pos = candidate_ious >= iou_thresh_per_gt[None, :] # (K, M)
# check whether anchor_center in gt_boxes, only check BEV x-y axes
candidate_anchors = anchors[topk_idxs.view(-1)] # (KxM, 7)
gt_boxes_of_each_anchor = gt_boxes[:, :].repeat(self.topk, 1) # (KxM, 7)
xyz_local = candidate_anchors[:, 0:3] - gt_boxes_of_each_anchor[:, 0:3]
xyz_local = common_utils.rotate_points_along_z(
xyz_local[:, None, :], -gt_boxes_of_each_anchor[:, 6]
).squeeze(dim=1)
xy_local = xyz_local[:, 0:2]
lw = gt_boxes_of_each_anchor[:, 3:5][:, [1, 0]] # bugfixed: w ==> y, l ==> x in local coords
is_in_gt = ((xy_local <= lw / 2) & (xy_local >= -lw / 2)).all(dim=-1).view(-1, num_gt) # (K, M)
is_pos = is_pos & is_in_gt # (K, M)
for ng in range(num_gt):
topk_idxs[:, ng] += ng * num_anchor
# select the highest IoU if an anchor box is assigned with multiple gt_boxes
INF = -0x7FFFFFFF
ious_inf = torch.full_like(ious, INF).t().contiguous().view(-1) # (MxN)
index = topk_idxs.view(-1)[is_pos.view(-1)]
ious_inf[index] = ious.t().contiguous().view(-1)[index]
ious_inf = ious_inf.view(num_gt, -1).t() # (N, M)
anchors_to_gt_values, anchors_to_gt_indexs = ious_inf.max(dim=1)
# match the gt_boxes to the anchors which have maximum iou with them
max_iou_of_each_gt, argmax_iou_of_each_gt = ious.max(dim=0)
anchors_to_gt_indexs[argmax_iou_of_each_gt] = torch.arange(0, num_gt, device=ious.device)
anchors_to_gt_values[argmax_iou_of_each_gt] = max_iou_of_each_gt
cls_labels = gt_classes[anchors_to_gt_indexs]
cls_labels[anchors_to_gt_values == INF] = 0
matched_gts = gt_boxes[anchors_to_gt_indexs]
pos_mask = cls_labels > 0
reg_targets = matched_gts.new_zeros((num_anchor, self.box_coder.code_size))
reg_weights = matched_gts.new_zeros(num_anchor)
if pos_mask.sum() > 0:
reg_targets[pos_mask > 0] = self.box_coder.encode_torch(matched_gts[pos_mask > 0], anchors[pos_mask > 0])
reg_weights[pos_mask] = 1.0
return cls_labels, reg_targets, reg_weights
| 6,050
| 41.612676
| 117
|
py
|
SASA
|
SASA-main/pcdet/models/roi_heads/roi_head_template.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...utils import box_coder_utils, common_utils, loss_utils
from ..model_utils.model_nms_utils import class_agnostic_nms
from .target_assigner.proposal_target_layer import ProposalTargetLayer
class RoIHeadTemplate(nn.Module):
def __init__(self, num_class, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.box_coder = getattr(box_coder_utils, self.model_cfg.TARGET_CONFIG.BOX_CODER)(
**self.model_cfg.TARGET_CONFIG.get('BOX_CODER_CONFIG', {})
)
self.proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG)
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'reg_loss_func',
loss_utils.WeightedSmoothL1Loss(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights'])
)
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
if self.model_cfg.DP_RATIO >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO))
fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
@torch.no_grad()
def proposal_layer(self, batch_dict, nms_config):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
nms_config:
Returns:
batch_dict:
rois: (B, num_rois, 7+C)
roi_scores: (B, num_rois)
roi_labels: (B, num_rois)
"""
batch_size = batch_dict['batch_size']
batch_box_preds = batch_dict['batch_box_preds']
batch_cls_preds = batch_dict['batch_cls_preds']
rois = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE, batch_box_preds.shape[-1]))
roi_scores = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE))
roi_labels = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE), dtype=torch.long)
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_cls_preds.shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_box_preds[batch_mask]
cls_preds = batch_cls_preds[batch_mask]
cur_roi_scores, cur_roi_labels = torch.max(cls_preds, dim=1)
if nms_config.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
selected, selected_scores = class_agnostic_nms(
box_scores=cur_roi_scores, box_preds=box_preds, nms_config=nms_config
)
rois[index, :len(selected), :] = box_preds[selected]
roi_scores[index, :len(selected)] = cur_roi_scores[selected]
roi_labels[index, :len(selected)] = cur_roi_labels[selected]
batch_dict['rois'] = rois
batch_dict['roi_scores'] = roi_scores
batch_dict['roi_labels'] = roi_labels + 1
batch_dict['has_class_labels'] = True if batch_cls_preds.shape[-1] > 1 else False
batch_dict.pop('batch_index', None)
return batch_dict
def assign_targets(self, batch_dict):
batch_size = batch_dict['batch_size']
with torch.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict['rois'] # (B, N, 7 + C)
gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1)
targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach()
# canonical transformation
roi_center = rois[:, :, 0:3]
roi_ry = rois[:, :, 6] % (2 * np.pi)
gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
# transfer LiDAR coords to local coords
gt_of_rois = common_utils.rotate_points_along_z(
points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
).view(batch_size, -1, gt_of_rois.shape[-1])
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict['gt_of_rois'] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
code_size = self.box_coder.code_size
reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size)
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
roi_boxes3d = forward_ret_dict['rois']
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == 'smooth-l1':
rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size)
rois_anchor[:, 0:3] = 0
rois_anchor[:, 6] = 0
reg_targets = self.box_coder.encode_torch(
gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor
)
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),
reg_targets.unsqueeze(dim=0),
) # [B, M, 7]
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item()
if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0:
# TODO: NEED to BE CHECK
fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask]
fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask]
fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
batch_anchors = fg_roi_boxes3d.clone().detach()
roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_torch(
fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors
).view(-1, code_size)
rcnn_boxes3d = common_utils.rotate_points_along_z(
rcnn_boxes3d.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
loss_corner = loss_utils.get_corner_loss_lidar(
rcnn_boxes3d[:, 0:7],
gt_of_rois_src[fg_mask][:, 0:7]
)
loss_corner = loss_corner.mean()
loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight']
rcnn_loss_reg += loss_corner
tb_dict['rcnn_loss_corner'] = loss_corner.item()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = self.box_coder.code_size
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1])
batch_box_preds = box_preds.view(batch_size, -1, code_size)
roi_ry = rois[:, :, 6].view(-1)
roi_xyz = rois[:, :, 0:3].view(-1, 3)
local_rois = rois.clone().detach()
local_rois[:, :, 0:3] = 0
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(-1, code_size)
batch_box_preds = common_utils.rotate_points_along_z(
batch_box_preds.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.view(batch_size, -1, code_size)
return batch_cls_preds, batch_box_preds
| 11,451
| 43.216216
| 128
|
py
|
SASA
|
SASA-main/pcdet/models/roi_heads/partA2_head.py
|
import numpy as np
import spconv
import torch
import torch.nn as nn
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from .roi_head_template import RoIHeadTemplate
class PartA2FCHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
block = self.post_act_block
c0 = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES // 2
self.conv_part = spconv.SparseSequential(
block(4, 64, 3, padding=1, indice_key='rcnn_subm1'),
block(64, c0, 3, padding=1, indice_key='rcnn_subm1_1'),
)
self.conv_rpn = spconv.SparseSequential(
block(input_channels, 64, 3, padding=1, indice_key='rcnn_subm2'),
block(64, c0, 3, padding=1, indice_key='rcnn_subm1_2'),
)
shared_fc_list = []
pool_size = self.model_cfg.ROI_AWARE_POOL.POOL_SIZE
pre_channel = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES * pool_size * pool_size * pool_size
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.roiaware_pool3d_layer = roiaware_pool3d_utils.RoIAwarePool3d(
out_size=self.model_cfg.ROI_AWARE_POOL.POOL_SIZE,
max_pts_each_voxel=self.model_cfg.ROI_AWARE_POOL.MAX_POINTS_PER_VOXEL
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def post_act_block(self, in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='subm'):
if conv_type == 'subm':
m = spconv.SparseSequential(
spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == 'spconv':
m = spconv.SparseSequential(
spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == 'inverseconv':
m = spconv.SparseSequential(
spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size,
indice_key=indice_key, bias=False),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
raise NotImplementedError
return m
def roiaware_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
batch_idx = batch_dict['point_coords'][:, 0]
point_coords = batch_dict['point_coords'][:, 1:4]
point_features = batch_dict['point_features']
part_features = torch.cat((
batch_dict['point_part_offset'] if not self.model_cfg.get('DISABLE_PART', False) else point_coords,
batch_dict['point_cls_scores'].view(-1, 1).detach()
), dim=1)
part_features[part_features[:, -1] < self.model_cfg.SEG_MASK_SCORE_THRESH, 0:3] = 0
rois = batch_dict['rois']
pooled_part_features_list, pooled_rpn_features_list = [], []
for bs_idx in range(batch_size):
bs_mask = (batch_idx == bs_idx)
cur_point_coords = point_coords[bs_mask]
cur_part_features = part_features[bs_mask]
cur_rpn_features = point_features[bs_mask]
cur_roi = rois[bs_idx][:, 0:7].contiguous() # (N, 7)
pooled_part_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_part_features, pool_method='avg'
) # (N, out_x, out_y, out_z, 4)
pooled_rpn_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_rpn_features, pool_method='max'
) # (N, out_x, out_y, out_z, C)
pooled_part_features_list.append(pooled_part_features)
pooled_rpn_features_list.append(pooled_rpn_features)
pooled_part_features = torch.cat(pooled_part_features_list, dim=0) # (B * N, out_x, out_y, out_z, 4)
pooled_rpn_features = torch.cat(pooled_rpn_features_list, dim=0) # (B * N, out_x, out_y, out_z, C)
return pooled_part_features, pooled_rpn_features
@staticmethod
def fake_sparse_idx(sparse_idx, batch_size_rcnn):
print('Warning: Sparse_Idx_Shape(%s) \r' % (str(sparse_idx.shape)), end='', flush=True)
# at most one sample is non-empty, then fake the first voxels of each sample(BN needs at least
# two values each channel) as non-empty for the below calculation
sparse_idx = sparse_idx.new_zeros((batch_size_rcnn, 3))
bs_idxs = torch.arange(batch_size_rcnn).type_as(sparse_idx).view(-1, 1)
sparse_idx = torch.cat((bs_idxs, sparse_idx), dim=1)
return sparse_idx
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_part_features, pooled_rpn_features = self.roiaware_pool(batch_dict)
batch_size_rcnn = pooled_part_features.shape[0] # (B * N, out_x, out_y, out_z, 4)
# transform to sparse tensors
sparse_shape = np.array(pooled_part_features.shape[1:4], dtype=np.int32)
sparse_idx = pooled_part_features.sum(dim=-1).nonzero() # (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx]
if sparse_idx.shape[0] < 3:
sparse_idx = self.fake_sparse_idx(sparse_idx, batch_size_rcnn)
if self.training:
# these are invalid samples
targets_dict['rcnn_cls_labels'].fill_(-1)
targets_dict['reg_valid_mask'].fill_(-1)
part_features = pooled_part_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
rpn_features = pooled_rpn_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
coords = sparse_idx.int()
part_features = spconv.SparseConvTensor(part_features, coords, sparse_shape, batch_size_rcnn)
rpn_features = spconv.SparseConvTensor(rpn_features, coords, sparse_shape, batch_size_rcnn)
# forward rcnn network
x_part = self.conv_part(part_features)
x_rpn = self.conv_rpn(rpn_features)
merged_feature = torch.cat((x_rpn.features, x_part.features), dim=1) # (N, C)
shared_feature = spconv.SparseConvTensor(merged_feature, coords, sparse_shape, batch_size_rcnn)
shared_feature = shared_feature.dense().view(batch_size_rcnn, -1, 1)
shared_feature = self.shared_fc_layer(shared_feature)
rcnn_cls = self.cls_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 10,039
| 43.622222
| 120
|
py
|
SASA
|
SASA-main/pcdet/models/roi_heads/__init__.py
|
from .partA2_head import PartA2FCHead
from .pointrcnn_head import PointRCNNHead
from .pvrcnn_head import PVRCNNHead
from .roi_head_template import RoIHeadTemplate
__all__ = {
'RoIHeadTemplate': RoIHeadTemplate,
'PartA2FCHead': PartA2FCHead,
'PVRCNNHead': PVRCNNHead,
'PointRCNNHead': PointRCNNHead
}
| 317
| 25.5
| 46
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.