repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
wgenpatex | wgenpatex-main/run_cnn_synthesis.py | import argparse
import wgenpatex
import model
import torch
parser = argparse.ArgumentParser()
parser.add_argument('target_image_path', help='paths of target texture image')
parser.add_argument('-w', '--patch_size', type=int,default=4, help="patch size (default: 4)")
parser.add_argument('-nmax', '--n_iter_max', type=int, default=5000, help="max iterations of the algorithm(default: 5000)")
parser.add_argument('-npsi', '--n_iter_psi', type=int, default=10, help="max iterations for psi (default: 10)")
parser.add_argument('-nin', '--n_patches_in', type=int, default=-1, help="number of patches of the synthetized texture used at each iteration, -1 corresponds to all patches (default: -1)")
parser.add_argument('-nout', '--n_patches_out', type=int, default=2000, help="number maximum of patches of the target texture used, -1 corresponds to all patches (default: 2000)")
parser.add_argument('-sc', '--scales', type=int, default=5, help="number of scales used (default: 5)")
parser.add_argument('--visu', action='store_true', help='show intermediate results')
parser.add_argument('--save', action='store_true', help='save temp results in /tmp folder')
parser.add_argument('--keops', action='store_true', help='use keops package')
args = parser.parse_args()
generator = wgenpatex.learn_model(args)
# save the texture generator
torch.save(generator.state_dict(), 'generator.pt')
# sample an image and save it
synth_img = model.sample_fake_img(generator, [1,3,512,512] , n_samples=1)
wgenpatex.imshow(synth_img)
wgenpatex.imsave('synthesized.png', synth_img)
| 1,562 | 54.821429 | 188 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/combination_energy.py | import torch as tc
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import set_random_seed
import pdb
import sklearn
import sklearn.decomposition
from matplotlib.patches import ConnectionPatch
set_random_seed(233)
def F_norm(Y):
return (Y ** 2).sum()
n = 500
d = 128
W1 = tc.randn(d,d) * 0.1
W2 = tc.randn(n,n) * 0.1
num_epochs = 200
YB1 = tc.rand(n,d)
YB2 = tc.rand(n,d)
class Model(nn.Module):
def __init__(self , Y0):
super().__init__()
self.Y = nn.Parameter( Y0 )
Y0 = tc.rand(n,d) * 2
model = Model( Y0 )
model_h = Model( Y0.detach().clone() )
optimizer_1 = tc.optim.SGD(model.parameters() , lr = 0.05)
optimizer_2 = tc.optim.SGD(model.parameters() , lr = 0.05)
optimizer_h = tc.optim.SGD(model_h.parameters() , lr = 0.01) # 用来寻找h的最优点
def ener_1(Y):
return F_norm(Y @ W1) + F_norm( Y - YB1 )
def ener_2(Y):
return F_norm(W2 @ Y) + F_norm( Y - YB2 )
mapW = tc.randn(2 , n*d)
def mapsto(Y):
Y = Y.view(-1,1)
Y = mapW @ Y
return Y
tot_losses = []
Y_trace = []
for epoch_id in tqdm( range(num_epochs) ):
Y = model.Y
if epoch_id % 2 == 0:
loss = ener_1(Y)
optimizer = optimizer_1
elif epoch_id % 2 == 1:
loss = ener_2(Y)
optimizer = optimizer_2
optimizer.zero_grad()
loss.backward()
optimizer.step()
with tc.no_grad():
tot_loss = ener_1(Y) + ener_2(Y)
tot_losses.append( tot_loss )
Y_trace.append( Y.data.clone().view(1,-1) )
# 求h的最优解
_pbar = tqdm(range(600))
for _ in _pbar:
loss_h = ener_1(model_h.Y) + ener_2(model_h.Y)
optimizer_h.zero_grad()
loss_h.backward()
optimizer_h.step()
_pbar.set_description("loss = %.4f" % loss_h)
Y_trace = tc.cat( Y_trace , dim = 0 ) # (num_epoch , n*d)
pca = sklearn.decomposition.PCA(2)
mapedYs = pca.fit_transform(Y_trace)
mapedh = tc.Tensor( pca.transform(model_h.Y.detach().view(1,-1)) ).view(-1)
tot_losses = tc.Tensor(tot_losses)
tot_losses = tc.log(tot_losses)
tot_losses = tot_losses - tot_losses.min()
# ---- 画轨迹 ----
# 小方框
xl , xr = mapedh[0]-4,mapedh[0]+4
yb , yt = mapedh[1]-4,mapedh[1]+4
fig = plt.figure(figsize=(12,5) , dpi=512)
p1 = plt.subplot(121)
p2 = plt.subplot(122)
p1.plot( mapedYs[:,0] , mapedYs[:,1] , zorder = 1 , label = "trace of $Y^{(t)}$")
p1.scatter( mapedh[0] , mapedh[1] , color = (1,0.4,0.1) , s = 40 , zorder = 2 , marker = "^" , label = "$Y_h^*$")
p1.scatter( mapedYs[0][0] , mapedYs[0][1] , color = (0.7,0.4,0.4) , s = 40 , zorder = 3 , marker = "*" , label = "$Y^{(0)}$")
p1.plot( [xl,xr,xr,xl,xl] , [yt,yt,yb,yb,yt] , color = (0.2,0.0,0.2,0.7))
p1.legend()
# p1.set_xlabel("x[0]")
# p1.set_ylabel("x[1]")
p2.plot( mapedYs[:,0] , mapedYs[:,1] , zorder = 1 , label = "trace of $Y^{(t)}$")
p2.scatter( mapedh[0] , mapedh[1] , color = (1,0.4,0.1) , s = 40 , zorder = 2 , marker = "^" , label = "$Y_h^*$")
p2.set_xlim(xl , xr)
p2.set_ylim(yb , yt)
p2.legend()
# p2.set_xlabel("x[0]")
# p2.set_ylabel("x[1]")
# 连接p1和p2
con1 = ConnectionPatch(
xyA = [xr,yt] , xyB = [xl,yt] ,
coordsA = "data" , coordsB = "data" ,
axesA = p1 , axesB = p2 ,
color = (0.2,0.0,0.2,0.7) , linestyle = "dashed"
)
con2 = ConnectionPatch(
xyA = [xr,yb] , xyB = [xl,yb] ,
coordsA = "data" , coordsB = "data" ,
axesA = p1 , axesB = p2 ,
color = (0.2,0.0,0.2,0.7) , linestyle = "dashed"
)
p1.add_artist(con1)
p1.add_artist(con2)
fig.tight_layout()
plt.savefig("generated_figures/alternate_trace.png")
# ---- 画能量函数图 ----
fig = plt.figure(figsize=(6,4) , dpi=512)
plt.plot( range(num_epochs) , tot_losses )
plt.xlabel("$t$" , fontsize = 15)
plt.ylabel("$\log E\\left(Y^{(t)}\\right) - \log E_{\\min}$" , fontsize = 15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
fig.tight_layout()
plt.savefig("generated_figures/alternate_energy.png")
| 3,850 | 24.335526 | 125 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/divergence.py | import torch as tc
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import set_random_seed
import pdb
import sklearn
import sklearn.decomposition
import scipy.spatial as spt
import itertools
set_random_seed(23333)
def norm(x):
return (x ** 2).sum(-1) ** 0.5
W = tc.randn(2,2)
W = W @ W.t()
B = tc.randn(1,2)
def h(x):
return 0.5 * x.t() @ W @ x + 0.5 * norm(x)**2
def alphah(X):
alpha = 0.25
return alpha * X @ W + alpha * X - alpha * B
def div(X):
'''X: (n,2)'''
xi1 = alphah(X) # (n,2)
xi2 = X # (n,2)
r = xi2**2 - xi1**2
r [r >= 0] = 0
return r.sum(-1) / norm(xi1)**2
fig = plt.figure(figsize = (24,6) , dpi = 512 )
p1 = plt.subplot(141)
p2 = plt.subplot(142)
p3 = plt.subplot(143)
p4 = plt.subplot(144)
for thres , pl in zip([0.2 , 0.4 , 0.6, 0.8] , [p1,p2,p3,p4]):
X = tc.arange(-5,5 , 0.02)
Y = tc.arange(-5,5 , 0.02)
xys = tc.Tensor( list(itertools.product(X.numpy(),Y.numpy())) )
idx = div(xys) >= -thres
xys = xys[idx]
pl.scatter(xys[:,0] , xys[:,1] , s = 4)
pl.set_xlim(-5,5)
pl.set_ylim(-5,5)
for tick in pl.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in pl.yaxis.get_major_ticks():
tick.label.set_fontsize(15)
# pl.set_xticks(fontsize=15)
# pl.set_yticks(fontsize=15)
pl.set_title("$\kappa={0}$".format(thres) , fontsize = 18)
fig.tight_layout()
# plt.show()
plt.savefig("generated_figures/divergence.png")
| 1,504 | 19.902778 | 67 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/utils.py | import random
import torch as tc
import numpy as np
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
tc.manual_seed(seed)
tc.cuda.manual_seed_all(seed)
tc.backends.cudnn.deterministic = True
tc.backends.cudnn.benchmark = False
| 269 | 21.5 | 42 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/apollo_circle.py | import torch as tc
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import set_random_seed
import pdb
import sklearn
import sklearn.decomposition
import scipy.spatial as spt
import itertools
set_random_seed(23333)
def norm(x):
return (x ** 2).sum(-1) ** 0.5
def paint(xf,xh,C):
fig = plt.figure(figsize = (6,6) , dpi=512)
X = tc.arange(-3.5,3.5 , 0.02)
Y = tc.arange(-3.5,3.5 , 0.02)
xys = tc.Tensor( list(itertools.product(X.numpy(),Y.numpy())) )
idx = norm( xys - xf ) / norm( xys - xh ) <= C
xys = xys[idx]
plt.scatter(xys[:,0] , xys[:,1] , s = 4)
plt.scatter(xf[0,0] , xf[0,1] , s = 122 , color = (0.4,0,0) , marker = "^")
plt.scatter(xh[0,0] , xh[0,1] , s = 122 , color = (0,0.4,0) , marker = "^")
plt.xlim(-3.5,3.5)
plt.ylim(-3.5,3.5)
# plt.xlabel("",fontsize = 15)
# plt.ylabel("",fontsize = 15)
plt.xticks(fontsize = 15)
plt.yticks(fontsize = 15)
plt.text( xf[0,0]-0.3 , xf[0,1]-0.5 , s = "$\mathbf{y}_f^*$" , fontsize = 18)
plt.text( xh[0,0]-0.3 , xh[0,1]-0.5 , s = "$\mathbf{y}_h^*$" , fontsize = 18)
fig.tight_layout()
plt.savefig("generated_figures/apollo_C={0}.png".format(C))
# plt.show()
xf = tc.randn(2).view(1,-1)
xh = tc.randn(2).view(1,-1)
paint(xf,xh,0.7)
paint(xf,xh,1.5)
print ("xf = {0}, xh = {1}".format(xf,xh)) | 1,370 | 25.365385 | 81 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/energy_curve/main.py | from model import Transformer
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
from fastNLP.io import IMDBLoader
from fastNLP.io import IMDBPipe
from fastNLP.embeddings import StaticEmbedding
import pdb
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
from load_data import load_data
from paint import paint
from tqdm import tqdm
import numpy as np
import torch as tc
import random
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
tc.manual_seed(seed)
tc.cuda.manual_seed_all(seed)
tc.backends.cudnn.deterministic = True
tc.backends.cudnn.benchmark = False
set_random_seed(2333)
dataset_names = ["imdb" , "sst2"]
for dataset_name in dataset_names:
norelu_name = ""
relu_name = ""
if dataset_name == "imdb":
norelu_name = "norelu"
relu_name = "relu"
else:
norelu_name = "norelu_%s" % dataset_name
relu_name = "relu_%s" % dataset_name
data_bundle , word2vec_embed = load_data(dataset_name)
train_data = data_bundle.get_dataset("train")
d = word2vec_embed.embedding_dim
num_layers = 12
model_1 = Transformer(d , num_layers , 2)
model_1.normalize_weight()
paint(model_1 , "../generated_figures/%s.png" % norelu_name)
model_2 = Transformer(d , num_layers , 2)
model_2.normalize_weight()
paint(model_2 , "../generated_figures/%s.png" % relu_name)
| 1,428 | 24.517857 | 64 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/energy_curve/paint.py | from model import Transformer
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
from fastNLP.io import IMDBLoader
from fastNLP.io import IMDBPipe
from fastNLP.embeddings import StaticEmbedding
import pdb
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
from load_data import load_data
from tqdm import tqdm
import random
num_test_epoch = 200
def paint(model , savepath = None):
model = model.eval()
data_bundle , word2vec_embed = load_data()
datas = data_bundle.get_dataset("test")
eners_tot = [ ]
for data_idx in tqdm( random.sample( list(range(len(datas))) , num_test_epoch) ):
Y = word2vec_embed(datas[data_idx]["words"])
eners = model(Y)["ener"]
eners = tc.Tensor(eners).view(-1)
eners_tot.append(eners)
eners_tot = tc.cat( [e.view(-1 , 1) for e in eners_tot] , dim = -1 )
eners_tot = eners_tot - eners_tot.min()
# eners_tot = eners_tot / eners_tot.max()
fig = plt.figure(figsize=(8,4))
plt.plot( range(model.num_layers + 1) , eners_tot.mean(dim = -1) )
pre_bp = plt.boxplot(
eners_tot ,
positions = list(range(model.num_layers + 1)),
showfliers = False
)
res = {key : [v.get_data() for v in value] for key, value in pre_bp.items()}
whiskers = res["whiskers"]
whisker_min = min( [ whiskers[i][1].min() for i in range(len(whiskers))] )
# plt.cla()
plt.close()
fig = plt.figure(figsize=(8,4))
eners_tot = eners_tot - float( whisker_min )
plt.plot( range(model.num_layers + 1) , eners_tot.mean(dim = -1) )
plt.boxplot(
eners_tot ,
positions = list(range(model.num_layers + 1)),
showfliers = False
)
plt.xlabel("$t$ (layer index)" , fontsize = 15)
plt.ylabel("$E\\left(Y^{(t)}\\right) - E_{\\min}$" , fontsize = 15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.ticklabel_format(style='sci', scilimits=(-1,2), axis='y')
fig.tight_layout()
if savepath is None:
plt.show()
else:
plt.savefig(savepath)
| 2,092 | 28.069444 | 85 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/energy_curve/model.py | import torch as tc
import torch.nn as nn
import torch.nn.functional as F
import pdb
alpha_1 = 1
alpha_2 = 1
def norm2(X):
return (X ** 2).sum()
def inner(x,y):
return (x.view(-1) * y.view(-1)).sum()
idxs_cache = {}
class Attention(nn.Module):
def __init__(self , d):
super().__init__()
self.d = d
self._W = nn.Parameter( tc.zeros(d,d) )
self.reset_params()
def reset_params(self):
with tc.no_grad():
nn.init.xavier_normal_(self._W.data)
def normalize_weight(self):
pass
@property
def W(self):
return self._W.t() @ self._W
def get_energy(self , Y):
Y = Y @ self._W
n = Y.size(0)
rho = lambda x: - tc.exp( - x )
with tc.no_grad():
ener = 0
# 这他妈太慢了
# for i in range(n):
# for j in range(i):
# ener = ener + rho( 0.5 * norm2(Y[i] - Y[j]) )
if idxs_cache.get(n) is None:
idxs_cache[n] = {
"idxs_i": tc.LongTensor( [ i for i in range(n) for j in range(i)] ) ,
"idxs_j": tc.LongTensor( [ j for i in range(n) for j in range(i)] ) ,
}
idxs_i = idxs_cache[n]["idxs_i"]
idxs_j = idxs_cache[n]["idxs_j"]
ener_rho = rho( 0.5 * ((Y[idxs_i] - Y[idxs_j])**2).sum(-1) ).sum()
ener = ener_rho + 0.5 * norm2(Y)
return ener
def forward(self , Y):
'''
Y: (n,d)
'''
n , d = Y.size(0) , Y.size(1)
beta = -0.5 * ((Y @ self._W) ** 2).sum(-1)
# if not self.training:
# pdb.set_trace()
A = tc.softmax( Y @ self.W @ Y.t() , -1 )
Z = (1-alpha_1) * Y + alpha_1 * A @ Y
return Z
class FFN(nn.Module):
def __init__(self , d):
super().__init__()
self.d = d
self._Wf = nn.Parameter( tc.zeros(d,d) )
self.B = nn.Parameter( tc.zeros(1,d) )
self.reset_params()
def reset_params(self):
with tc.no_grad():
nn.init.xavier_normal_(self._Wf.data)
nn.init.xavier_normal_(self.B.data)
def normalize_weight(self):
with tc.no_grad():
W = self._Wf.data
W = W @ W.t()
L , U = tc.linalg.eigh(W) # W = U @ L.diag() @ U.t()
L[L > 0.95] = 0.95
L[L < -0.95] = -0.95
W = U @ L.diag() @ U.t()
self._Wf.data = W
@property
def Wf(self):
return - 0.5 * alpha_2 * (self._Wf + self._Wf.t()) + (1-alpha_2) * tc.eye(self.d)
def get_energy(self , Y):
with tc.no_grad():
return 0.5 * tc.trace(Y @ self._Wf @ Y.t()) + 0.5 * norm2(Y - self.B)
def forward(self , Y):
'''
Y: (n,d)
'''
Y = Y @ self.Wf + self.B
return Y
class TransformerLayer(nn.Module):
def __init__(self , d , relu):
super().__init__()
self.d = d
self.relu = relu
self.attn = Attention(self.d)
self.ffn = FFN(self.d)
def get_energy(self , Y):
ener = 0
ener = ener + self.attn.get_energy(Y)
ener = ener + self.ffn.get_energy(Y)
return ener
def normalize_weight(self):
self.attn.normalize_weight()
self.ffn.normalize_weight()
def forward(self , Y):
Y = self.attn(Y)
Y = self.ffn(Y)
if self.relu:
Y = F.relu(Y)
return Y
class Transformer(nn.Module):
def __init__(self , d , num_layers , output_size = 2 , relu = False):
super().__init__()
self.d = d
self.num_layers = num_layers
self.relu = relu
self.rec_layer = TransformerLayer(self.d , self.relu)
self.output = nn.Linear(d , output_size)
def normalize_weight(self):
self.rec_layer.normalize_weight()
def get_energy(self , Y ):
return self.rec_layer.get_energy(Y)
def forward(self , Y):
# if not self.training:
# pdb.set_trace()
energies = [ self.get_energy(Y) ]
for layer_idx in range(self.num_layers):
Y = self.rec_layer(Y)
energies.append( self.get_energy(Y) )
output = self.output(Y)
return {
"repr": Y ,
"ener": energies ,
"pred": output ,
}
| 4,459 | 21.989691 | 90 | py |
Transformers-From-Optimization | Transformers-From-Optimization-main/energy_curve/load_data.py | from model import Transformer
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
from fastNLP.io import IMDBLoader , SST2Loader
from fastNLP.io import IMDBPipe , SST2Pipe
from fastNLP.embeddings import StaticEmbedding
import pdb
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
def load_data(data_name = "imdb"):
if data_name == "imdb":
save_path = Path("./chach_datas.pkl")
if not save_path.exists():
loader = IMDBLoader()
pipe = IMDBPipe()
data_bundle = pipe.process( loader.load(loader.download()) )
word_vocab = data_bundle.get_vocab("words")
word2vec_embed = StaticEmbedding(word_vocab, model_dir_or_name = "en")
with open(save_path , "wb") as fil:
pickle.dump([data_bundle , word2vec_embed] , fil)
else:
with open(save_path , "rb") as fil:
data_bundle , word2vec_embed = pickle.load(fil)
return data_bundle , word2vec_embed
save_path = Path("./chach_datas_sst2.pkl")
if not save_path.exists():
loader = SST2Loader()
pipe = SST2Pipe()
data_bundle = pipe.process( loader.load(loader.download()) )
word_vocab = data_bundle.get_vocab("words")
word2vec_embed = StaticEmbedding(word_vocab, model_dir_or_name = "en")
with open(save_path , "wb") as fil:
pickle.dump([data_bundle , word2vec_embed] , fil)
else:
with open(save_path , "rb") as fil:
data_bundle , word2vec_embed = pickle.load(fil)
return data_bundle , word2vec_embed
| 1,638 | 29.351852 | 82 | py |
pybullet-gym | pybullet-gym-master/pybulletgym/agents/agents_kerasrl.py | # with some extra arg parsing
from keras.models import Sequential, Model # The Sequential model is a sequential, feed-forward stack of layers.
from keras.layers import Dense, Activation, Flatten, Input, merge # Different types of layers
from keras.optimizers import Adam # A special type of optimizer
from rl.agents.cem import CEMAgent
from rl.memory import EpisodeParameterMemory
from rl.agents import DDPGAgent
from rl.memory import SequentialMemory # A first-in-first-out type of memory to do the experience replay on
from rl.random import OrnsteinUhlenbeckProcess # a noise process
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy # Instead of random actions, we tend to pick actions that have generated rewards before. As time goes on we only focus on one or two actions in each state.
from rl.memory import SequentialMemory # A first-in-first-out type of memory to do the experience replay on
from rl.agents import ContinuousDQNAgent
def add_opts(parser):
parser.add_argument('--model-type', type=int, default=1,
help="the dense-softmax-layer model (1) or the deep network (2)")
class KerasCEMAgent(object):
'''
The cross-entropy method Learning Agent as described in http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.6579&rep=rep1&type=pdf
'''
def __init__(self, opts):
self.metadata = {
'discrete_actions': True,
}
self.opts = opts
def configure(self, observation_space_shape, nb_actions):
if self.opts.model_type == 1:
# Option 1 : Simple model
model = Sequential()
model.add(Flatten(input_shape=(1,) + observation_space_shape))
model.add(Dense(nb_actions))
model.add(Activation('softmax'))
print(model.summary())
elif self.opts.model_type == 2:
# Option 2: deep network
model = Sequential()
model.add(Flatten(input_shape=(1,) + observation_space_shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('softmax'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = EpisodeParameterMemory(limit=1000, window_length=1)
self.agent = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05)
self.agent.compile()
def train(self, env, nb_steps, visualize, verbosity):
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
self.agent.fit(env, nb_steps=nb_steps, visualize=visualize, verbose=verbosity)
def test(self, env, nb_episodes, visualize):
# Finally, evaluate our algorithm for 5 episodes.
self.agent.test(env, nb_episodes=nb_episodes, visualize=visualize)
def load_weights(self, load_file):
self.agent.load_weights(load_file)
def save_weights(self, save_file, overwrite):
# After training is done, we save the best weights.
self.agent.save_weights(save_file, overwrite=overwrite)
class KerasDDPGAgent(object):
'''
The Deep Differential Policy Gradient Learning Agent as described in http://arxiv.org/abs/1509.02971
'''
def __init__(self, opts):
self.metadata = {
'discrete_actions': False,
}
self.opts = opts
def configure(self, observation_space_shape, nb_actions):
# Next, we build a simple model.
# actor network
actor = Sequential()
actor.add(Flatten(input_shape=(1,) + observation_space_shape))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('linear'))
print(actor.summary())
# critic network
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + observation_space_shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = merge([action_input, flattened_observation], mode='concat')
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(input=[action_input, observation_input], output=x)
print(critic.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
self.agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3)
self.agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])
def train(self, env, nb_steps, visualize, verbosity):
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
self.agent.fit(env, nb_steps=nb_steps, visualize=visualize, verbose=verbosity, nb_max_episode_steps=200)
def test(self, env, nb_episodes, visualize):
# Finally, evaluate our algorithm for 5 episodes.
self.agent.test(env, nb_episodes=nb_episodes, visualize=visualize, nb_max_episode_steps=200)
def load_weights(self, load_file):
self.agent.load_weights(load_file)
def save_weights(self, save_file, overwrite):
self.agent.save_weights(save_file, overwrite=True)
class KerasDDQNAgent(object):
'''
The deep Double Q Learning Agent as described in https://arxiv.org/abs/1509.06461
'''
def __init__(self, opts):
self.metadata = {
'discrete_actions': True,
}
self.opts = opts
def configure(self, observation_space_shape, nb_actions):
# Next, we build a simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + observation_space_shape)) # input layer
model.add(Dense(32)) # Just your regular fully connected NN layer
model.add(Activation('tanh')) # tanh activation layer
model.add(Dense(16)) # more model capacity through fully connected NN layers
model.add(Activation('relu')) # Rectified Linear Units
model.add(Dense(16)) # more model capacity through fully connected NN layers
model.add(Activation('relu')) # Rectified Linear Units
model.add(Dense(nb_actions)) # fully connected NN layer with one output for each action
model.add(Activation('linear')) # we want linear activations in the end
print(model.summary())
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
self.agent = DQNAgent(enable_double_dqn=True, model=model, nb_actions=nb_actions, memory=memory,
nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
self.agent.compile(Adam(lr=1e-3), metrics=['mae'])
def train(self, env, nb_steps, visualize, verbosity):
self.agent.fit(env, nb_steps=nb_steps, visualize=visualize, verbose=verbosity)
def test(self, env, nb_episodes, visualize):
self.agent.test(env, nb_episodes=nb_episodes, visualize=visualize)
def load_weights(self, load_file):
self.agent.load_weights(load_file)
def save_weights(self, save_file, overwrite):
self.agent.save_weights(save_file, overwrite)
class KerasDQNAgent(object):
'''
The deep Q Learning Agent as described in https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
'''
def __init__(self, opts):
self.metadata = {
'discrete_actions': True,
}
self.opts = opts
def configure(self, observation_space_shape, nb_actions):
# Next, we build a simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + observation_space_shape)) # input layer
model.add(Dense(32)) # Just your regular fully connected NN layer
model.add(Activation('tanh')) # tanh activation layer
model.add(Dense(16)) # more model capacity through fully connected NN layers
model.add(Activation('relu')) # Rectified Linear Units
model.add(Dense(16)) # more model capacity through fully connected NN layers
model.add(Activation('relu')) # Rectified Linear Units
model.add(Dense(nb_actions)) # fully connected NN layer with one output for each action
model.add(Activation('linear')) # we want linear activations in the end
print(model.summary())
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
self.agent = DQNAgent(enable_double_dqn=False, model=model, nb_actions=nb_actions, memory=memory,
nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
self.agent.compile(Adam(lr=1e-3), metrics=['mae'])
def train(self, env, nb_steps, visualize, verbosity):
self.agent.fit(env, nb_steps=nb_steps, visualize=visualize, verbose=verbosity)
def test(self, env, nb_episodes, visualize):
self.agent.test(env, nb_episodes=nb_episodes, visualize=visualize)
def load_weights(self, load_file):
self.agent.load_weights(load_file)
def save_weights(self, save_file, overwrite):
self.agent.save_weights(save_file, overwrite)
class KerasNAFAgent(object):
'''
The Normalized Advantage Functions Agent as described in https://arxiv.org/abs/1603.00748
'''
def __init__(self, opts):
self.metadata = {
'discrete_actions': False,
}
self.opts = opts
def configure(self, observation_space_shape, nb_actions):
# Build all necessary models: V, mu, and L networks.
V_model = Sequential()
V_model.add(Flatten(input_shape=(1,) + observation_space_shape))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(1))
V_model.add(Activation('linear'))
print(V_model.summary())
mu_model = Sequential()
mu_model.add(Flatten(input_shape=(1,) + observation_space_shape))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(nb_actions))
mu_model.add(Activation('linear'))
print(mu_model.summary())
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + observation_space_shape, name='observation_input')
x = merge([action_input, Flatten()(observation_input)], mode='concat')
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(((nb_actions * nb_actions + nb_actions) / 2))(x)
x = Activation('linear')(x)
L_model = Model(input=[action_input, observation_input], output=x)
print(L_model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3, size=nb_actions)
self.agent = ContinuousDQNAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3)
self.agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])
def train(self, env, nb_steps, visualize, verbosity):
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
self.agent.fit(env, nb_steps=nb_steps, visualize=visualize, verbose=verbosity, nb_max_episode_steps=200)
def test(self, env, nb_episodes, visualize):
self.agent.test(env, nb_episodes=nb_episodes, visualize=visualize, nb_max_episode_steps=200)
def load_weights(self, load_file):
self.agent.load_weights(load_file)
def save_weights(self, save_file, overwrite):
# After training is done, we save the final weights.
self.agent.save_weights(save_file, overwrite=overwrite)
class TemplateAgent(object):
'''
This is your template to copy from which an agent should fullfill. I know this is not as duck-type as possible, but helps to quickly implement new ones.
'''
def __init__(self, opts):
self.opts = opts
def configure(self, observation_space_shape, nb_actions):
pass
def train(self, env, nb_steps, visualize, verbosity):
pass
def test(self, env, nb_episodes, visualize):
pass
def load_weights(self, load_file):
pass
def save_weights(self, save_file, overwrite):
pass
| 12,858 | 36.272464 | 194 | py |
pybullet-gym | pybullet-gym-master/pybulletgym/agents/__init__.py | # ---- register agents ----------
import agent_register
# agent_register.register(
# id='BaselinesDQNAgent-v0',
# entry_point='agents_baselines:BaselinesDQNAgent'
# )
agent_register.register(
id='KerasCEMAgent-v0',
entry_point='pybullet_envs.agents.agents_kerasrl:KerasCEMAgent'
)
agent_register.register(
id='KerasDDPGAgent-v0',
entry_point='pybullet_envs.agents.agents_kerasrl:KerasDDPGAgent'
)
agent_register.register(
id='KerasDDQNAgent-v0',
entry_point='pybullet_envs.agents.agents_kerasrl:KerasDDQNAgent'
)
agent_register.register(
id='KerasDQNAgent-v0',
entry_point='pybullet_envs.agents.agents_kerasrl:KerasDQNAgent'
)
agent_register.register(
id='KerasNAFAgent-v0',
entry_point='pybullet_envs.agents.agents_kerasrl:KerasNAFAgent'
)
# from agents_baselines import BaselinesDQNAgent
from agents_kerasrl import KerasCEMAgent, KerasDDPGAgent, KerasDDQNAgent, KerasDQNAgent, KerasNAFAgent
| 912 | 24.361111 | 102 | py |
dilran | dilran-main/inference.py | # inference fused image
import os
import argparse
import torch
import torch.nn as nn
import sys
from torchmetrics import PeakSignalNoiseRatio
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from models.model_v5 import *
from our_utils import *
from eval import psnr, ssim, mutual_information
from evaluation_metrics import fsim, nmi, en
import skimage.io as io
# import sys
# sys.path.append("./model")
parser = argparse.ArgumentParser(description='Inference Fused Image configs')
parser.add_argument('--test_folder', type=str, default='./testset', help='input test image')
parser.add_argument('--model', type=str, default='./res/pretrained_models/model_v5/last.pt', help='which model to use')
parser.add_argument('--save_folder', type=str, default='./res/fused_image', help='input image to use')
parser.add_argument('--output_filename', type=str, help='where to save the output image')
parser.add_argument('--cuda', action='store_true', help='use cuda', default='true')
opt = parser.parse_args()
########### gpu ###############
device = torch.device("cuda:0" if opt.cuda else "cpu")
###############################
######### make dirs ############
# save_dir = os.path.join(opt.save_folder, "model_vf_sfnnMean")
# if not os.path.exists(save_dir):
# os.mkdir(save_dir)
###############################
####### loading pretrained model ########
model = fullModel().to(device)
model.load_state_dict(torch.load(opt.model))
#model = torch.load(opt.model)
#########################################
########### loading test set ###########
test_ct = torch.load(os.path.join(opt.test_folder, 'ct_test.pt')).to(device)
test_mri = torch.load(os.path.join(opt.test_folder, 'mri_test.pt')).to(device)
########################################
psnr = PeakSignalNoiseRatio()
psnrs = []
ssims = []
nmis = []
mis = []
fsims = []
ens = []
for slice in range(test_ct.shape[0]):
# if slice > 0:
# break
ct_slice = test_ct[slice,:,:,:].unsqueeze(0)
mri_slice = test_mri[slice,:,:,:].unsqueeze(0)
ct_fe = model.fe(ct_slice)
#print(ct_fe.shape)
mri_fe = model.fe(mri_slice)
fused = fusion_strategy(ct_fe, mri_fe, device, "SFNN")
#fused = torch.maximum(ct_fe, mri_fe)
final = model.recon(fused)
#print(final.squeeze(0).squeeze(0))
final = final.squeeze(0).squeeze(0).detach().cpu().clamp(min=0, max=1)
gt1 = ct_slice.squeeze(0).squeeze(0).cpu().clamp(min=0, max=1)
#print(torch.min(gt1), torch.max(gt1))
gt2 = mri_slice.squeeze(0).squeeze(0).cpu().clamp(min=0, max=1)
# io.imsave(os.path.join(save_dir, "fused_{}.jpg".format(slice)), (final.numpy() * 255).astype(np.uint8))
# io.imsave(os.path.join(save_dir, "mri_{}.jpg".format(slice)), (gt2.numpy() * 255).astype(np.uint8))
# io.imsave(os.path.join(save_dir, "ct_{}.jpg".format(slice)), (gt1.numpy() * 255).astype(np.uint8))
#print("image {} saved".format(slice))
psnr_val1 = psnr(final, gt1)
psnr_val2 = psnr(final, gt2)
psnr_val = (psnr_val1 + psnr_val2) / 2
#print(psnr_val.item())
psnrs.append(psnr_val.item())
ssim_val1 = ssim(final, gt1)
ssim_val2 = ssim(final, gt2)
ssim_val = (ssim_val1 + ssim_val2) / 2
#print(ssim_val)
ssims.append(ssim_val)
nmi_val1 = nmi(final, gt1)
nmi_val2 = nmi(final, gt2)
nmi_val = (nmi_val1 + nmi_val2) / 2
#print(nmi_val)
nmis.append(nmi_val)
mi_val1 = mutual_information(final, gt1)
mi_val2 = mutual_information(final, gt2)
mi_val = (mi_val1 + mi_val2) / 2
#print(mi_val)
mis.append(mi_val)
fsim_val1 = fsim(final, gt1)
fsim_val2 = fsim(final, gt2)
fsim_val = (fsim_val1 + fsim_val2) / 2
fsims.append(fsim_val)
en_val = en(final)
ens.append(en_val)
#plt.imshow(ct_fe[0,32,:,:].detach().cpu().numpy(), "gray")
#plt.show()
# plt.figure(figsize=(12, 5))
# plt.subplot(1, 3, 1)
# plt.imshow(ct_slice.squeeze(0).squeeze(0).cpu().numpy(), "gray")
# plt.title("CT Slice")
# plt.subplot(1, 3, 2)
# plt.imshow(mri_slice.squeeze(0).squeeze(0).cpu().numpy(), "gray")
# plt.title("MRI Slice")
# plt.subplot(1, 3, 3)
# plt.imshow(final.numpy(), "gray")
# plt.title("Fused Slice")
# plt.show()
print("psnrs")
print(sum(psnrs) / len(psnrs))
print("ssims")
print(sum(ssims) / len(ssims))
print("nmis")
print(sum(nmis) / len(nmis))
print("mis")
print(sum(mis) / len(mis))
print("fsims")
print(sum(fsims) / len(fsims))
print("entropy")
print(sum(ens) / len(ens)) | 4,455 | 29.944444 | 119 | py |
dilran | dilran-main/val.py | # Validation script for the project
# Validate a trained medical image fusion model
# Author: Reacher, last modify Nov. 28, 2022
'''
Change log:
Reacher: file created
'''
from evaluation_metrics import *
# run validation for every epoch
import os
import argparse
import torch
import torch.nn as nn
from torchmetrics import PeakSignalNoiseRatio
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from model import *
from our_utils import *
test_folder = './testset'
save_folder = './res/fused_image'
output_filename = None
cuda = True
########### gpu ###############
device = torch.device("cuda:0" if cuda else "cpu")
###############################
######### make dirs ############
if not os.path.exists(save_folder):
os.mkdir(save_folder)
###############################
####### loading pretrained model ########
#########################################
########### loading test set ###########
test_ct = torch.load(os.path.join(test_folder, 'ct_test.pt')).to(device)
test_mri = torch.load(os.path.join(test_folder, 'mri_test.pt')).to(device)
########################################
# psnr = PeakSignalNoiseRatio()
# for strategy in [ "addition", "average", "FER", "L1NW", "AL1NW", "FL1N"]:
# for strategy in ["average", "max_val", "FER", "FL1N"]:
def validate(model_pt):
model = fullModel().to(device)
model.load_state_dict(torch.load(model_pt, map_location=device))
# Use SFNN strategy
for strategy in ["SFNN"]:
psnrs, ssims, nmis, mis, fsims = [], [], [], [], []
for slice in range(test_ct.shape[0]):
ct_slice = test_ct[slice, :, :, :].unsqueeze(0)
mri_slice = test_mri[slice, :, :, :].unsqueeze(0)
ct_fe = model.fe(ct_slice)
# print(ct_fe.shape)
mri_fe = model.fe(mri_slice)
fused = fusion_strategy(ct_fe, mri_fe, device=device, strategy=strategy)
final = model.recon(fused)
final = final.squeeze(0).squeeze(0).detach().cpu().clamp(min=0, max=1.)
gt1 = ct_slice.squeeze(0).squeeze(0).cpu().clamp(min=0, max=1.)
gt2 = mri_slice.squeeze(0).squeeze(0).cpu().clamp(min=0, max=1.)
psnr_val1 = psnr(final, gt1)
psnr_val2 = psnr(final, gt2)
psnr_val = (psnr_val1 + psnr_val2) / 2
psnrs.append(psnr_val)
ssim_val1 = ssim(final.unsqueeze(0).unsqueeze(0), gt1.unsqueeze(0).unsqueeze(0))
ssim_val2 = ssim(final.unsqueeze(0).unsqueeze(0), gt2.unsqueeze(0).unsqueeze(0))
ssim_val = (ssim_val1 + ssim_val2) / 2
ssims.append(ssim_val)
nmi_val1 = nmi(final, gt1)
nmi_val2 = nmi(final, gt2)
nmi_val = (nmi_val1 + nmi_val2) / 2
nmis.append(nmi_val)
mi_val1 = mutual_information(final, gt1)
mi_val2 = mutual_information(final, gt2)
mi_val = (mi_val1 + mi_val2) / 2
mis.append(mi_val)
fsim_val1 = fsim(final, gt1)
fsim_val2 = fsim(final, gt2)
fsim_val = (fsim_val1 + fsim_val2) / 2
fsims.append(fsim_val)
# print(len(psnrs))
print(strategy)
# print(f"Average PSNR: {np.mean(psnrs)}")
# print(f"Average SSIM: {np.mean(ssims)}")
# print(f"Average NMI: {np.mean(nmis)}")
# print(f"Average MI: {np.mean(mis)}")
# print("---------------------")
val_psnr = np.mean(psnrs)
val_ssim = np.mean(ssims)
val_nmi = np.mean(nmis)
val_mi = np.mean(mis)
val_fsim = np.mean(fsims)
return val_psnr, val_ssim, val_nmi, val_mi, val_fsim
| 3,651 | 30.756522 | 92 | py |
dilran | dilran-main/evaluation_metrics.py | # Evaluation Metrics and get results
# Author: Reacher Z., last modify Nov. 26, 2022
"""
Change log:
- Reacher: file created, implement PSNR, SSIM, NMI, MI
"""
import numpy as np
import torch
from skimage.metrics import peak_signal_noise_ratio, normalized_mutual_information
from scipy.stats import entropy
from torchmetrics import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure
from sklearn.metrics import mutual_info_score
# import piq
import cv2
import phasepack.phasecong as pc
import skimage.measure as skm
def psnr(img_pred: torch.Tensor, img_true: torch.Tensor):
"""
To compute PeakSignalNoiseRatio
Return: float
"""
peakSignalNoiseRatio = PeakSignalNoiseRatio(data_range=1.0)
return peakSignalNoiseRatio(img_pred, img_true).item()
def ssim(img_pred: torch.Tensor, img_true: torch.Tensor):
"""
To compute PeakSignalNoiseRatio
Input: [N, C, H, W] shape
Return: float
"""
structuralSimilarityIndexMeasure = StructuralSimilarityIndexMeasure(data_range=1.0)
return structuralSimilarityIndexMeasure(img_pred, img_true).item()
def nmi(img_pred: torch.Tensor, img_true: torch.Tensor):
"""
normalized mutual information (NMI)
Return: float
"""
img_pred_np = np.array(img_pred)#.squeeze())
img_true_np = np.array(img_true)#.squeeze())
nor_mi = normalized_mutual_information(img_pred_np, img_true_np)
return nor_mi
# def mutual_information(img_pred: torch.Tensor, img_true: torch.Tensor):
# """
# Mutual Information:
# I(A,B) = H(A) + H(B) - H(A,B)
# H(A)= -sum p(a_i) * log p(a_i)
# Mutual information is a measure of image matching, that does not require the signal
# to be the same in the two images. It is a measure of how well you can predict the signal
# in the second image, given the signal intensity in the first.
#
# Return: float
# """
# img_pred_uint8 = (np.array(img_pred.squeeze()) * 255).flatten()
# img_true_uint8 = (np.array(img_true.squeeze()) * 255).flatten()
# size = img_true_uint8.shape[-1]
# pa = np.histogram(img_pred_uint8, 256, (0, 255))[0] / size
# pb = np.histogram(img_true_uint8, 256, (0, 255))[0] / size
# ha = -np.sum(pa * np.log(pa + 1e-20))
# hb = -np.sum(pb * np.log(pb + 1e-20))
#
# pab = (np.histogram2d(img_pred_uint8, img_true_uint8, 256, [[0, 255], [0, 255]])[0]) / size
# hab = -np.sum(pab * np.log(pab + 1e-20))
# mi = ha + hb - hab
# return mi
def mutual_information(img_pred: torch.Tensor, img_true: torch.Tensor):
img_pred_np = np.array(img_pred)#.squeeze())
img_true_np = np.array(img_true)#.squeeze())
padded0, padded1 = img_pred_np, img_true_np
hist, bin_edges = np.histogramdd(
[np.reshape(padded0, -1), np.reshape(padded1, -1)],
density=True,
)
H0 = entropy(np.sum(hist, axis=0))
H1 = entropy(np.sum(hist, axis=1))
H01 = entropy(np.reshape(hist, -1))
return H0 + H1 - H01
# def fsim(img_pred: torch.Tensor, img_true: torch.Tensor):
# print(img_pred.shape)
# return piq.fsim(img_pred.unsqueeze(0).unsqueeze(0), img_true.unsqueeze(0).unsqueeze(0))
# def fsim(img_pred: torch.Tensor, img_true: torch.Tensor):
# img_pred_np = np.array(img_pred.squeeze())
# img_true_np = np.array(img_true.squeeze())
# print(img_pred.shape)
# return quality_metrics.fsim(img_true_np, img_pred_np)
# # return piq.fsim(img_pred.unsqueeze(0).unsqueeze(0), img_true.unsqueeze(0).unsqueeze(0))
def _gradient_magnitude(img: np.ndarray, img_depth):
"""
Calculate gradient magnitude based on Scharr operator
"""
scharrx = cv2.Scharr(img, img_depth, 1, 0)
scharry = cv2.Scharr(img, img_depth, 0, 1)
return np.sqrt(scharrx ** 2 + scharry ** 2)
def _similarity_measure(x, y, constant):
"""
Calculate feature similarity measurement between two images
"""
numerator = 2 * x * y + constant
denominator = x ** 2 + y ** 2 + constant
return numerator / denominator
def fsim(img_pred: torch.Tensor, img_true: torch.Tensor, T1=0.85, T2=160) -> float:
"""
Feature-based similarity index, based on phase congruency (PC) and image gradient magnitude (GM)
There are different ways to implement PC, the authors of the original FSIM paper use the method
defined by Kovesi (1999). The Python phasepack project fortunately provides an implementation
of the approach.
There are also alternatives to implement GM, the FSIM authors suggest to use the Scharr
operation which is implemented in OpenCV.
Note that FSIM is defined in the original papers for grayscale as well as for RGB images. Our use cases
are mostly multi-band images e.g. RGB + NIR. To accommodate for this fact, we compute FSIM for each individual
band and then take the average.
Note also that T1 and T2 are constants depending on the dynamic range of PC/GM values. In theory this parameters
would benefit from fine-tuning based on the used data, we use the values found in the original paper as defaults.
Args:
org_img -- numpy array containing the original image
pred_img -- predicted image
T1 -- constant based on the dynamic range of PC values
T2 -- constant based on the dynamic range of GM values
"""
alpha = beta = 1 # parameters used to adjust the relative importance of PC and GM features
fsim_list = []
pred_img = np.array(img_pred.squeeze())
org_img = np.array(img_true.squeeze())
for it in range(1):
# Calculate the PC for original and predicted images
pc1_2dim = pc(org_img[:, :], nscale=4, minWaveLength=6, mult=2, sigmaOnf=0.5978)
pc2_2dim = pc(pred_img[:, :], nscale=4, minWaveLength=6, mult=2, sigmaOnf=0.5978)
# pc1_2dim and pc2_2dim are tuples with the length 7, we only need the 4th element which is the PC.
# The PC itself is a list with the size of 6 (number of orientation). Therefore, we need to
# calculate the sum of all these 6 arrays.
pc1_2dim_sum = np.zeros((org_img.shape[0], org_img.shape[1]), dtype=np.float64)
pc2_2dim_sum = np.zeros((pred_img.shape[0], pred_img.shape[1]), dtype=np.float64)
for orientation in range(6):
pc1_2dim_sum += pc1_2dim[4][orientation]
pc2_2dim_sum += pc2_2dim[4][orientation]
# Calculate GM for original and predicted images based on Scharr operator
gm1 = _gradient_magnitude(org_img[:, :], cv2.CV_16U)
gm2 = _gradient_magnitude(pred_img[:, :], cv2.CV_16U)
# Calculate similarity measure for PC1 and PC2
S_pc = _similarity_measure(pc1_2dim_sum, pc2_2dim_sum, T1)
# Calculate similarity measure for GM1 and GM2
S_g = _similarity_measure(gm1, gm2, T2)
S_l = (S_pc ** alpha) * (S_g ** beta)
numerator = np.sum(S_l * np.maximum(pc1_2dim_sum, pc2_2dim_sum))
denominator = np.sum(np.maximum(pc1_2dim_sum, pc2_2dim_sum))
fsim_list.append(numerator / denominator)
return np.mean(fsim_list)
def en(img: torch.Tensor):
entropy = skm.shannon_entropy(img)
return entropy | 7,123 | 37.717391 | 117 | py |
dilran | dilran-main/train_with_val.py | # Training script for the project
# Author: Simon Zhou, last modify Nov. 18, 2022
'''
Change log:
-Simon: file created, write some training code
-Simon: refine training script
-Reacher: train v3
'''
import argparse
import os
import sys
sys.path.append("../")
from tqdm import trange
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.models import vgg16_bn
import meta_config as config
from model import *
from our_utils import *
from dataset_loader import *
from loss import *
from val import validate
import wandb
parser = argparse.ArgumentParser(description='parameters for the training script')
parser.add_argument('--dataset', type=str, default="CT-MRI",
help="which dataset to use, available option: CT-MRI, MRI-PET, MRI-SPECT")
parser.add_argument('--batch_size', type=int, default=4, help='batch size for training')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs for training')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate for training')
parser.add_argument('--lr_decay', type=bool, default=False, help='decay learing rate?')
parser.add_argument('--accum_batch', type=int, default=1, help='number of batches for gradient accumulation')
parser.add_argument('--lambda1', type=float, default=0.5, help='weight for image gradient loss')
parser.add_argument('--lambda2', type=float, default=0.5, help='weight for perceptual loss')
# parser.add_argument('--checkpoint', type=str, default='./model', help='Path to checkpoint')
parser.add_argument('--cuda', action='store_true', help='whether to use cuda', default=True)
parser.add_argument('--seed', type=int, default=3407, help='random seed to use')
parser.add_argument('--base_loss', type=str, default='l1_charbonnier',
help='which loss function to use for pixel-level (l2 or l1 charbonnier)')
opt = parser.parse_args()
######### whether to use cuda ####################
device = torch.device("cuda:0" if opt.cuda else "cpu")
#################################################
########## seeding ##############
seed_val = opt.seed
random_seed(seed_val, opt.cuda)
################################
############ making dirs########################
if not os.path.exists(config.res_dir):
os.mkdir(config.res_dir)
model_dir = os.path.join(config.res_dir, "pretrained_models")
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if not os.path.exists(config.test_data_dir):
os.mkdir(config.test_data_dir)
################################################
####### loading dataset ####################################
target_dir = os.path.join(config.data_dir, opt.dataset)
ct, mri = get_common_file(target_dir)
train_ct, train_mri, test_ct, test_mri = load_data(ct, target_dir, config.test_num)
torch.save(test_ct, os.path.join(config.test_data_dir, "ct_test.pt"))
torch.save(test_mri, os.path.join(config.test_data_dir, "mri_test.pt"))
# print(train_ct.shape, train_mri.shape, test_ct.shape, test_mri.shape)
train_total = torch.cat((train_ct, train_mri), dim=0).to(device)
# these loaders return index, not the actual image
train_loader, val_loader = get_loader(train_ct, train_mri, config.train_val_ratio, opt.batch_size)
print("train loader length: ", len(train_loader), " val loder length: ", len(val_loader))
# check the seed is working
# for batch_idx in train_loader:
# batch_idx = batch_idx.view(-1).long()
# print(batch_idx)
# print("validation index")
# for batch_idx in val_loader:
# batch_idx = batch_idx.view(-1).long()
# print(batch_idx)
# sys.exit()
############################################################
############ loading model #####################
model = fullModel().to(device)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if opt.lr_decay:
stepLR = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
###################################################
##### downloading pretrained vgg model ##################
vgg = vgg16_bn(pretrained=True)
########################################################
############## train model ##############
wandb.init(project="test-project", entity="csc2529", config=opt) # visualize in wandb
# wandb.config = {
# "learning_rate": opt.lr,
# "epochs": opt.epochs,
# "batch_size": opt.batch_size,
# "lambda1": c.lambda1,
# "lambda2": c.lambda2
# }
wandb.watch(model)
# gradient accumulation for small batch
NUM_ACCUMULATION_STEPS = opt.accum_batch
train_loss = []
val_loss = []
t = trange(opt.epochs, desc='Training progress...', leave=True)
lowest_val_loss = int(1e9)
best_ssim = 0
for i in t:
print("new epoch {} starts!".format(i))
# clear gradient in model
model.zero_grad()
b_loss = 0
# train model
model.train()
for j, batch_idx in enumerate(train_loader):
# clear gradient in optimizer
optimizer.zero_grad()
batch_idx = batch_idx.view(-1).long()
img = train_total[batch_idx]
img_out = model(img)
# compute loss
loss, _, _, _ = loss_func2(vgg, img_out, img, opt.lambda1, opt.lambda2, config.block_idx, device)
# back propagate and update weights
# print("batch reg, grad, percep loss: ", reg_loss.item(), img_grad.item(), percep.item())
# loss = loss / NUM_ACCUMULATION_STEPS
loss.backward()
# if ((j + 1) % NUM_ACCUMULATION_STEPS == 0) or (j + 1 == len(train_loader)):
optimizer.step()
b_loss += loss.item()
# wandb.log({"loss": loss})
# store loss
ave_loss = b_loss / len(train_loader)
train_loss.append(ave_loss)
print("epoch {}, training loss is: {}".format(i, ave_loss))
# validation
val_loss = []
val_display_img = []
with torch.no_grad():
b_loss = 0
# eval model, unable update weights
model.eval()
for k, batch_idx in enumerate(val_loader):
batch_idx = batch_idx.view(-1).long()
val_img = train_total[batch_idx]
val_img_out = model(val_img)
# display first image to visualize, this can be changed
val_display_img.extend([val_img_out[i].squeeze(0).cpu().numpy() for i in range(1)])
loss, _, _, _ = loss_func2(vgg, img_out, img, opt.lambda1, opt.lambda2, config.block_idx, device)
b_loss += loss.item()
ave_val_loss = b_loss / len(val_loader)
val_loss.append(ave_val_loss)
print("epoch {}, validation loss is: {}".format(i, ave_val_loss))
# define a metric we are interested in the minimum of
wandb.define_metric("train loss", summary="min")
# define a metric we are interested in the maximum of
wandb.define_metric("val loss", summary="min")
wandb.log({"train loss": ave_loss, "epoch": i})
wandb.log({"val loss": ave_val_loss, "epoch": i})
wandb.log({"val sample images": [wandb.Image(img) for img in val_display_img]})
# save model
if ave_val_loss < lowest_val_loss:
torch.save(model.state_dict(), model_dir + "/model_at_{}.pt".format(i))
lowest_val_loss = ave_val_loss
print("model is saved in epoch {}".format(i))
# Evaluate during training
# Save the current model
torch.save(model.state_dict(), model_dir + "/current.pt".format(i))
val_psnr, val_ssim, val_nmi, val_mi, val_fsim = validate(model_dir + "/current.pt")
# define a metric we are interested in the maximum of
wandb.define_metric("PSNR", summary="max")
wandb.define_metric("SSIM", summary="max")
wandb.define_metric("NMI", summary="max")
wandb.define_metric("MI", summary="max")
wandb.define_metric("FSIM", summary="max")
wandb.log({"PSNR": val_psnr, "epoch": i})
wandb.log({"SSIM": val_ssim, "epoch": i})
wandb.log({"NMI": val_nmi, "epoch": i})
wandb.log({"MI": val_mi, "epoch": i})
wandb.log({"FSIM": val_fsim, "epoch": i})
print("PSNR", "SSIM", "NMI", "MI", "FSIM")
print(val_psnr, val_ssim, val_nmi, val_mi, val_fsim)
if val_ssim > best_ssim:
best_ssim = val_ssim
print(f"ヾ(◍°∇°◍)ノ゙ New best SSIM = {best_ssim}")
# overwrite
torch.save(model.state_dict(), model_dir + "/best.pt".format(i))
if i == opt.epochs - 1:
torch.save(model.state_dict(), model_dir + "/last.pt".format(i))
# lr decay update
if opt.lr_decay:
stepLR.step()
########################################
| 8,409 | 35.885965 | 109 | py |
dilran | dilran-main/train_with_pair.py | # Training script for the project
# Author: Simon Zhou, last modify Nov. 18, 2022
'''
Change log:
-Simon: file created, write some training code
-Simon: refine training script
-Reacher: train v3
-Reacher: add model choice
-Simon: train with paired images, use FL1N fusion strategy
'''
import argparse
import sys
sys.path.append("../")
from tqdm import trange
import torch.optim as optim
from torchvision.models import vgg16_bn
import meta_config as config
from models.model_v5 import *
from our_utils import *
from dataset_loader import *
from loss import *
from val import validate
# from model_msrpan import SRN
import wandb
parser = argparse.ArgumentParser(description='parameters for the training script')
parser.add_argument('--dataset', type=str, default="CT-MRI",
help="which dataset to use, available option: CT-MRI, MRI-PET, MRI-SPECT")
parser.add_argument('--batch_size', type=int, default=3, help='batch size for training')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs for training')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate for training')
parser.add_argument('--lr_decay', type=bool, default=False, help='decay learing rate?')
parser.add_argument('--accum_batch', type=int, default=1, help='number of batches for gradient accumulation')
parser.add_argument('--lambda1', type=float, default=0.2, help='weight for image gradient loss')
parser.add_argument('--lambda2', type=float, default=0.2, help='weight for perceptual loss')
# parser.add_argument('--checkpoint', type=str, default='./model', help='Path to checkpoint')
parser.add_argument('--cuda', action='store_true', help='whether to use cuda', default=True)
parser.add_argument('--seed', type=int, default=3407, help='random seed to use')
parser.add_argument('--base_loss', type=str, default='l2_norm',
help='which loss function to use for pixel-level (l2 or l1 charbonnier)')
opt = parser.parse_args()
######### whether to use cuda ####################
device = torch.device("cuda:0" if opt.cuda else "cpu")
#################################################
########## seeding ##############
seed_val = opt.seed
random_seed(seed_val, opt.cuda)
################################
############ making dirs########################
if not os.path.exists(config.res_dir):
os.mkdir(config.res_dir)
model_dir = os.path.join(config.res_dir, "model_v5_trainPair")
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if not os.path.exists(config.test_data_dir):
os.mkdir(config.test_data_dir)
################################################
####### loading dataset ####################################
target_dir = os.path.join(config.data_dir, opt.dataset)
ct, mri = get_common_file(target_dir)
train_ct, train_mri, test_ct, test_mri = load_data(ct, target_dir, config.test_num)
# Save Test Set
# torch.save(test_ct, os.path.join(config.test_data_dir, "ct_test.pt"))
# torch.save(test_mri, os.path.join(config.test_data_dir, "mri_test.pt"))
# torch.save(test_gt, os.path.join(config.test_data_dir, "test_gt.pt"))
# print(train_ct.shape, train_mri.shape, test_ct.shape, test_mri.shape)
# train_total = torch.cat((train_ct, train_mri), dim=0).to(device)
train_ct = train_ct.to(device)
train_mri = train_mri.to(device)
# these loaders return index, not the actual image
train_loader, val_loader = get_loader2(train_ct, train_mri, config.train_val_ratio, opt.batch_size)
print("train loader length: ", len(train_loader), " val loder length: ", len(val_loader))
# check the seed is working
# for batch_idx in train_loader:
# batch_idx = batch_idx.view(-1).long()
# print(batch_idx)
# print("validation index")
# for batch_idx in val_loader:
# batch_idx = batch_idx.view(-1).long()
# print(batch_idx)
# sys.exit()
############################################################
"""
choose model
"""
model = fullModel().to(device)
print("Default: Training ours")
############ loading model #####################
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if opt.lr_decay:
stepLR = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
###################################################
##### downloading pretrained vgg model ##################
vgg = vgg16_bn(pretrained=True)
########################################################
############## train model ##############
wandb.init(project="test-project", entity="csc2529", config=opt) # visualize in wandb
wandb.watch(model)
# gradient accumulation for small batch
NUM_ACCUMULATION_STEPS = opt.accum_batch
train_loss = []
val_loss = []
t = trange(opt.epochs, desc='Training progress...', leave=True)
lowest_val_loss = int(1e9)
best_ssim = 0
for i in t:
print("new epoch {} starts!".format(i))
# clear gradient in model
model.zero_grad()
b_loss = 0
# train model
model.train()
for j, batch_idx in enumerate(train_loader):
# clear gradient in optimizer
optimizer.zero_grad()
batch_idx = batch_idx.view(-1).long()
# fuse while fusion
img_1 = train_ct[batch_idx]
img_2 = train_mri[batch_idx]
img_1_fe = model.fe(img_1)
img_2_fe = model.fe(img_2)
fused = fusion_strategy(img_1_fe, img_2_fe, device, strategy="FL1N")
fused_recon = model.recon(fused)
img_out = fused_recon
# compute loss
loss1, _, _, _ = loss_func2(vgg, img_out, img_1, opt.lambda1, opt.lambda2, config.block_idx, device)
loss2, _, _, _ = loss_func2(vgg, img_out, img_2, opt.lambda1, opt.lambda2, config.block_idx, device)
loss = loss1 + loss2
# back propagate and update weights
# print("batch reg, grad, percep loss: ", reg_loss.item(), img_grad.item(), percep.item())
# loss = loss / NUM_ACCUMULATION_STEPS
loss.backward()
# if ((j + 1) % NUM_ACCUMULATION_STEPS == 0) or (j + 1 == len(train_loader)):
optimizer.step()
b_loss += loss.item()
# wandb.log({"loss": loss})
# store loss
ave_loss = b_loss / len(train_loader)
train_loss.append(ave_loss)
print("epoch {}, training loss is: {}".format(i, ave_loss))
# validation
val_loss = []
val_display_img = []
with torch.no_grad():
b_loss = 0
# eval model, unable update weights
model.eval()
for k, batch_idx in enumerate(val_loader):
batch_idx = batch_idx.view(-1).long()
# fuse while fusion
img_1 = train_ct[batch_idx]
img_2 = train_mri[batch_idx]
img_1_fe = model.fe(img_1)
img_2_fe = model.fe(img_2)
fused = fusion_strategy(img_1_fe, img_2_fe, device, strategy="FL1N")
fused_recon = model.recon(fused)
img_out = fused_recon
# img_out = fused_recon.squeeze(0).squeeze(0).detach().clamp(min=0, max=1.)
# compute loss
# display first image to visualize, this can be changed
# val_display_img.extend([img_out[i].squeeze(0).cpu().numpy() for i in range(1)])
loss1, _, _, _ = loss_func2(vgg, img_out, img_1, opt.lambda1, opt.lambda2, config.block_idx, device)
loss2, _, _, _ = loss_func2(vgg, img_out, img_2, opt.lambda1, opt.lambda2, config.block_idx, device)
loss = loss1 + loss2
b_loss += loss.item()
ave_val_loss = b_loss / len(val_loader)
val_loss.append(ave_val_loss)
print("epoch {}, validation loss is: {}".format(i, ave_val_loss))
# define a metric we are interested in the minimum of
wandb.define_metric("train loss", summary="min")
# define a metric we are interested in the maximum of
wandb.define_metric("val loss", summary="min")
wandb.log({"train loss": ave_loss, "epoch": i})
wandb.log({"val loss": ave_val_loss, "epoch": i})
# wandb.log({"val sample images": [wandb.Image(img) for img in val_display_img]})
# save model
if ave_val_loss < lowest_val_loss:
torch.save(model.state_dict(), model_dir + "/model_lowest_loss.pt")
lowest_val_loss = ave_val_loss
print("model is saved in epoch {}".format(i))
# Evaluate during training
# Save the current model
torch.save(model.state_dict(), model_dir + "/current.pt".format(i))
val_psnr, val_ssim, val_nmi, val_mi, val_fsim = validate(model_dir + "/current.pt")
# define a metric we are interested in the maximum of
wandb.define_metric("PSNR", summary="max")
wandb.define_metric("SSIM", summary="max")
wandb.define_metric("NMI", summary="max")
wandb.define_metric("MI", summary="max")
wandb.define_metric("FSIM", summary="max")
wandb.log({"PSNR": val_psnr, "epoch": i})
wandb.log({"SSIM": val_ssim, "epoch": i})
wandb.log({"NMI": val_nmi, "epoch": i})
wandb.log({"MI": val_mi, "epoch": i})
wandb.log({"FSIM": val_fsim, "epoch": i})
print("PSNR", "SSIM", "NMI", "MI", "FSIM")
print(val_psnr, val_ssim, val_nmi, val_mi, val_fsim)
if val_ssim > best_ssim:
best_ssim = val_ssim
print(f"ヾ(◍°∇°◍)ノ゙ New best SSIM = {best_ssim}")
# overwrite
torch.save(model.state_dict(), model_dir + "/best.pt".format(i))
if i == opt.epochs - 1:
torch.save(model.state_dict(), model_dir + "/last.pt".format(i))
# lr decay update
if opt.lr_decay:
stepLR.step()
######################################## | 9,453 | 34.810606 | 112 | py |
dilran | dilran-main/loss.py | # Loss functions for the project
# Author: Reacher Z., last modify Nov. 18, 2022
"""
Change log:
- Reacher: file created, implement L1 loss and L2 loss function
- Reacher: update image gradient calculation
- Simon: update image gradient loss
- Simon: add loss_func2, and L1_Charbonnier_loss
"""
import numpy as np
import torch
import torch.nn as nn
from our_utils import Percep_loss
from torchmetrics.functional import image_gradients
from torchvision.transforms import transforms
import torch.nn.functional as F
class grad_loss(nn.Module):
'''
image gradient loss
'''
def __init__(self, device, vis = False, type = "sobel"):
super(grad_loss, self).__init__()
# only use sobel filter now
if type == "sobel":
kernel_x = [[-1., 0., 1.], [-2., 0., 2.], [-1., 0., 1.]]
kernel_y = [[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]]
kernel_x = torch.FloatTensor(kernel_x).unsqueeze(0).unsqueeze(0)
kernel_y = torch.FloatTensor(kernel_y).unsqueeze(0).unsqueeze(0)
# do not want update these weights
self.weight_x = nn.Parameter(data=kernel_x, requires_grad=False).to(device)
self.weight_y = nn.Parameter(data=kernel_y, requires_grad=False).to(device)
self.vis = vis
def forward(self, x, y):
# conv2d to find image gradient in x direction and y direction
# of input image x and image y
grad_xx = F.conv2d(x, self.weight_x)
grad_xy = F.conv2d(x, self.weight_y)
grad_yx = F.conv2d(y, self.weight_x)
grad_yy = F.conv2d(y, self.weight_y)
if self.vis:
return grad_xx, grad_xy, grad_yx, grad_yy
# total image gradient, in dx and dy direction for image X and Y
# gradientX = torch.abs(grad_xx) + torch.abs(grad_xy)
# gradientY = torch.abs(grad_yx) + torch.abs(grad_yy)
x_diff = ((torch.abs(grad_xx) - torch.abs(grad_yx)) ** 2).mean()
y_diff = ((torch.abs(grad_xy) - torch.abs(grad_yy)) ** 2).mean()
# mean squared frobenius norm (||.||_F^2)
#grad_f_loss = torch.mean(torch.pow(torch.norm((gradientX - gradientY), p = "fro"), 2))
grad_f_loss = x_diff + y_diff
return grad_f_loss
class L1_Charbonnier_loss(nn.Module):
"""L1 Charbonnierloss."""
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-3
def forward(self, x, y):
# x: predict, y: target
loss = torch.mean(torch.sqrt((x - y)**2 + self.eps))
return loss
def l1_loss(predicted, target):
"""
To compute L1 loss using predicted and target
"""
return torch.abs(predicted - target).mean()
def mse_loss(predicted, target):
"""
To compute L2 loss between predicted and target
"""
return torch.pow((predicted - target), 2).mean()
#return torch.mean(torch.pow(torch.norm((predicted - target), p="fro"), 2))
def img_gradient(img: torch.Tensor):
"""
Input: one PIL Image or numpy.ndarray (H x W x C) in the range [0, 255]
Output: image gradient (2 x C x H x W)
"""
# trans = transforms.ToTensor()
# # a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
# img_tensor = trans(img)
# # reshape to [N, C, H, W]
# img_tensor = img_tensor.reshape((1, img_tensor.shape[0], img_tensor.shape[1], img_tensor.shape[2]))
dy, dx = image_gradients(img)
dy, dx = dy.squeeze(), dx.squeeze()
dxy = torch.stack((dx, dy), axis=0)
return dxy
def gradient_loss(predicted, target):
"""
compute image gradient loss between predicted and target
"""
# grad_p = np.gradient(predicted)
# grad_t = np.gradient(target)
grad_p = img_gradient(predicted)
grad_t = img_gradient(target)
return torch.pow((grad_p - grad_t), 2).mean()
def perceptual_loss(vgg, predicted, target, block_idx, device):
"""
compute perceptual loss between predicted and target
"""
p_loss = Percep_loss(vgg, block_idx, device)
return p_loss(predicted, target)
def loss_func(predicted, target, lambda1, lambda2, block_idx, device):
"""
Implement the loss function in our proposal
Loss = a variant of the MSE loss + perceptual loss
"""
loss = mse_loss(predicted, target) + lambda1 * gradient_loss(predicted, target)
+lambda2 * perceptual_loss(predicted, target, block_idx, device)
return loss
def loss_func2(vgg, predicted, target, lambda1, lambda2, block_idx, device):
"""
same as loss_func, except the gradient loss is change to grad_loss() class
"""
img_grad_loss = grad_loss(device)
#L1_charbonnier = L1_Charbonnier_loss()
#reg_loss = L1_charbonnier(predicted, target)
reg_loss = mse_loss(predicted, target)
img_grad_dif = img_grad_loss(predicted, target)
percep = perceptual_loss(vgg, predicted, target, block_idx, device)
loss = reg_loss + lambda1 * img_grad_dif + lambda2 * percep
return loss, reg_loss, img_grad_dif, percep
def loss_function_l2(predicted, target):
loss = nn.MSELoss()
return loss(predicted, target)
| 5,123 | 32.272727 | 105 | py |
dilran | dilran-main/model.py |
# Model Architecture
# Author: Landy Xu, created on Nov. 12, 2022
# Last modified by Simon on Nov. 13
'''
Change log:
- Landy: create feature extractor and DILRAN
- Simon: revise some writing style of module configs (e.g., replace = True),
refine the FE module, add recon module
- Simon: create full model pipeline
- Simon: add leaky relu to recon module
'''
import torch
import torch.nn as nn
import numpy as np
class DILRAN(nn.Module):
def __init__(self):
super(DILRAN, self).__init__()
# TODO: confirm convolution
self.conv = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.up = nn.Upsample(scale_factor=2, mode='nearest')
self.down = nn.AvgPool2d(2, 2)
self.lu = nn.ReLU(replace = True)
def forward(self, x):
prev = self.conv(x) + self.conv(self.conv(x)) + self.conv(self.conv(self.conv(x)))
return torch.mul(self.lu(self.up(self.down(x))), prev) + x
class FeatureExtractor(nn.Module):
def __init__(self, level):
super(FeatureExtractor, self).__init__()
# TODO: confirm dilated convolution
self.conv = nn.Conv2d(1, 64, (1, 1), (1, 1), (0, 0), dilation = 2)
self.network = DILRAN()
self.up = nn.Upsample(scale_factor=2, mode='nearest')
self.down = nn.AvgPool2d(2, 2)
self.lu = nn.ReLU(replace = True)
def forward(self, x):
n1 = self.network(self.conv(x[0]))
n2 = self.network(self.conv(x[1]))
n3 = self.network(self.conv(x[2]))
return torch.cat((n1, n2, n3), 0)
class DILRAN_V1(nn.Module):
'''
V1: concat the output of three (conv-d,DILRAN) paths channel wise and add the low level feature to the concat output
temporary, will edit if necessary
'''
def __init__(self, cat_first = False, use_leaky = False):
super(DILRAN_V1, self).__init__()
# cat_first, whether to perform channel-wise concat before DILRAN
# convolution in DILRAN, in channel is the channel from the previous block
if not cat_first:
self.conv_d = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding="same")
self.bnorm = nn.BatchNorm2d(num_features=64)
else:
self.conv_d = nn.Conv2d(in_channels=64*3, out_channels=64*3, kernel_size=3, stride=1, padding="same")
self.bnorm = nn.BatchNorm2d(num_features=64*3)
if not use_leaky:
self.relu = nn.ReLU(inplace = True)
else:
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
self.down = nn.AvgPool2d(2, 2)
self.up = nn.Upsample(scale_factor=2, mode="nearest")
def forward(self, x):
# pooling -> upsample -> ReLU block
pur_path = self.relu(self.up(self.down(x)))
# 3*3, 5*5, 7*7 multiscale addition block
conv_path = self.conv_d(x) + self.conv_d(self.conv_d(x)) + self.conv_d(self.conv_d(self.conv_d(x)))
# attention
attn = torch.mul(pur_path, conv_path)
# residual + attention
resid_x = x + attn
return resid_x
class FE_V1(nn.Module):
'''
feature extractor block (temporary, will edit if necessary)
'''
def __init__(self):
super(FE_V1, self).__init__()
# multiscale dilation conv2d
self.convd1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, dilation=1, padding="same")
self.convd2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, dilation=3, padding="same")
self.convd3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, dilation=5, padding="same")
self.relu = nn.ReLU(inplace = True)
self.lrelu = nn.LeakyReLU(0.2, inplace = True)
self.bnorm1 = nn.BatchNorm2d(num_features=64)
self.dilran = DILRAN_V1()
def forward(self, x):
# dilated convolution
dilf1 = self.convd1(x)
dilf2 = self.convd2(x)
dilf3 = self.convd3(x)
# DILRAN
dilran_o1 = self.dilran(dilf1)
# batchnorm
dilran_o1 = self.bnorm1(dilran_o1)
dilran_o2 = self.dilran(dilf2)
# batchnorm
dilran_o2 = self.bnorm1(dilran_o2)
dilran_o3 = self.dilran(dilf3)
# batchnorm
dilran_o3 = self.bnorm1(dilran_o3)
# concat
cat_o = torch.cat((dilran_o1, dilran_o2, dilran_o3), dim = 1) # first dim is batch, second dim is channel
return cat_o
class MSFuNet(nn.Module):
'''
the whole network (from input image -> feature maps to be used in fusion strategy)
temporary, will edit if necessary
'''
def __init__(self):
super(MSFuNet, self).__init__()
self.conv_id = nn.Conv2d(in_channels=64, out_channels=64*3, kernel_size=1, stride=1, padding="valid")
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding="same"),
nn.BatchNorm2d(num_features=64),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(in_channels=64*3, out_channels=128, kernel_size=3, stride=1, padding="same"),
nn.BatchNorm2d(num_features=128),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding="same"),
nn.BatchNorm2d(num_features=64),
nn.ReLU(inplace=True))
self.lrelu = nn.LeakyReLU(0.2, inplace = True)
self.fe = FE_V1()
def forward(self, x):
x = self.conv1(x) # shallow feature
# feature returned from feature extractor
cat_feature = self.fe(x)
# short cut connection
expand_x = self.conv_id(x)
add = expand_x + cat_feature
add = self.conv2(add)
add = self.conv3(add) # should get shape [b, 64, 256, 256]
return add
class Recon(nn.Module):
'''
reconstruction module (temporary, will edit if necessary)
'''
def __init__(self):
super(Recon, self).__init__()
self.recon_conv = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding="same"),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding="same"),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, stride=1, padding="same"),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=16, out_channels=1, kernel_size=3, stride=1, padding="same"),
nn.LeakyReLU(0.2, inplace=True))
def forward(self, x):
x = self.recon_conv(x)
return x # should get shape [b, 1, 256, 256]
class fullModel(nn.Module):
'''
Feature extractor + reconstruction
a full model pipeline
'''
def __init__(self):
super(fullModel, self).__init__()
self.fe = MSFuNet()
self.recon = Recon()
def forward(self, x):
deep_fe = self.fe(x)
recon_img = self.recon(deep_fe)
return recon_img | 7,479 | 35.847291 | 124 | py |
dilran | dilran-main/model_v5.py |
# Model Architecture
# Author: Landy Xu, created on Nov. 12, 2022
# Last modified by Simon on Nov. 13
# Version 2: add attention to shallow feature, change first conv to 1x1 kernal
'''
Change log:
- Landy: create feature extractor and DILRAN
- Simon: revise some writing style of module configs (e.g., replace = True),
refine the FE module, add recon module
- Simon: create full model pipeline
- Simon: add leaky relu to recon module
'''
import torch
import torch.nn as nn
import numpy as np
class DILRAN(nn.Module):
def __init__(self):
super(DILRAN, self).__init__()
# TODO: confirm convolution
self.conv = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.up = nn.Upsample(scale_factor=2, mode='nearest')
self.down = nn.AvgPool2d(2, 2)
self.lu = nn.ReLU(replace = True)
def forward(self, x):
prev = self.conv(x) + self.conv(self.conv(x)) + self.conv(self.conv(self.conv(x)))
return torch.mul(self.lu(self.up(self.down(x))), prev) + x
class FeatureExtractor(nn.Module):
def __init__(self, level):
super(FeatureExtractor, self).__init__()
# TODO: confirm dilated convolution
self.conv = nn.Conv2d(1, 64, (1, 1), (1, 1), (0, 0), dilation = 2)
self.network = DILRAN()
self.up = nn.Upsample(scale_factor=2, mode='nearest')
self.down = nn.AvgPool2d(2, 2)
self.lu = nn.ReLU(replace = True)
def forward(self, x):
n1 = self.network(self.conv(x[0]))
n2 = self.network(self.conv(x[1]))
n3 = self.network(self.conv(x[2]))
return torch.cat((n1, n2, n3), 0)
class DILRAN_V1(nn.Module):
'''
V1: concat the output of three (conv-d,DILRAN) paths channel wise and add the low level feature to the concat output
temporary, will edit if necessary
'''
def __init__(self, cat_first = False, use_leaky = False):
super(DILRAN_V1, self).__init__()
# cat_first, whether to perform channel-wise concat before DILRAN
# convolution in DILRAN, in channel is the channel from the previous block
if not cat_first:
self.conv_d = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding="same")
self.bnorm = nn.BatchNorm2d(num_features=64)
else:
self.conv_d = nn.Conv2d(in_channels=64*3, out_channels=64*3, kernel_size=3, stride=1, padding="same")
self.bnorm = nn.BatchNorm2d(num_features=64*3)
if not use_leaky:
self.relu = nn.ReLU()
else:
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
self.down = nn.AvgPool2d(2, 2)
self.up = nn.Upsample(scale_factor=2, mode="nearest")
def forward(self, x):
# pooling -> upsample -> ReLU block
pur_path = self.relu(self.up(self.down(x)))
# 3*3, 5*5, 7*7 multiscale addition block
conv_path = self.conv_d(x) + self.conv_d(self.conv_d(x)) + self.conv_d(self.conv_d(self.conv_d(x)))
# attention
attn = torch.mul(pur_path, conv_path)
# residual + attention
resid_x = x + attn
return resid_x
class FE_V1(nn.Module):
'''
feature extractor block (temporary, will edit if necessary)
'''
def __init__(self):
super(FE_V1, self).__init__()
# multiscale dilation conv2d
self.convd1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, dilation=1, padding="same")
self.convd2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, dilation=3, padding="same")
self.convd3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, dilation=5, padding="same")
self.reduce = nn.Conv2d(in_channels=64*3, out_channels=64, kernel_size=1, stride=1, padding="same")
self.relu = nn.ReLU()
self.bnorm1 = nn.BatchNorm2d(num_features=64)
self.dilran = DILRAN_V1()
def forward(self, x):
# dilated convolution
dilf1 = self.convd1(x)
dilf2 = self.convd2(x)
dilf3 = self.convd3(x)
diltotal = torch.cat((dilf1, dilf2, dilf3), dim = 1)
diltotal = self.reduce(diltotal)
diltotal = self.bnorm1(diltotal)
# single DILRAN
out = self.dilran(diltotal)
out = self.bnorm1(out)
#out = self.relu(out)
return out
# DILRAN
# dilran_o1 = self.dilran(dilf1)
# # batchnorm
# dilran_o1 = self.bnorm1(dilran_o1)
# dilran_o2 = self.dilran(dilf2)
# # batchnorm
# dilran_o2 = self.bnorm1(dilran_o2)
# dilran_o3 = self.dilran(dilf3)
# # batchnorm
# dilran_o3 = self.bnorm1(dilran_o3)
# # element-wise addition
# cat_o = dilran_o1 + dilran_o2 + dilran_o3
# return cat_o
class MSFuNet(nn.Module):
'''
the whole network (from input image -> feature maps to be used in fusion strategy)
temporary, will edit if necessary
'''
def __init__(self):
super(MSFuNet, self).__init__()
self.conv_id = nn.Sequential(nn.Conv2d(in_channels=64*3, out_channels=64, kernel_size=1, stride=1, padding="same"))
#nn.BatchNorm2d(num_features = 64))
#nn.ReLU(inplace=True))
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=1, stride=1, padding="same")
self.conv2 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding="same"),
nn.BatchNorm2d(num_features=64),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding="same"),
nn.BatchNorm2d(num_features=64))
self.relu = nn.ReLU()
self.down = nn.AvgPool2d(2, 2)
self.bnorm = nn.BatchNorm2d(num_features=64)
self.up = nn.Upsample(scale_factor=2, mode="nearest")
self.fe = FE_V1()
def forward(self, x):
# x: input image
temp0 = self.conv1(x) # shallow feature, 64 x (1x1)
pur_orig = self.relu(self.up(self.down(x)))
attn = torch.mul(pur_orig, temp0)
x = x + attn
# feature returned from feature extractor
deep_fe = self.fe(x)
pur_x = self.relu(self.up(self.down(x)))
attn2 = torch.mul(pur_x, deep_fe)
add = attn2 + x
return add
#x = x + cat_feature
# short cut connection
# expand_x = self.conv_id(x)
# add = expand_x + cat_feature
#add = self.conv2(add)
# add = self.conv2(resid) # should get shape [b, 64, 256, 256]
# return add
class Recon(nn.Module):
'''
reconstruction module (temporary, will edit if necessary)
'''
def __init__(self):
super(Recon, self).__init__()
# version 1
# self.recon_conv = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding="same"),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding="same"),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, stride=1, padding="same"),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Conv2d(in_channels=16, out_channels=1, kernel_size=3, stride=1, padding="same"),
# nn.LeakyReLU(0.2, inplace=True))
# version 2
self.recon_conv = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding="same"),
#nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding="same"),
#nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=1, kernel_size=3, stride=1, padding="same"))
#nn.ReLU())
def forward(self, x):
x = self.recon_conv(x)
return x # should get shape [b, 1, 256, 256]
class fullModel(nn.Module):
'''
Feature extractor + reconstruction
a full model pipeline
'''
def __init__(self):
super(fullModel, self).__init__()
self.fe = MSFuNet()
self.recon = Recon()
def forward(self, x):
deep_fe = self.fe(x)
recon_img = self.recon(deep_fe)
return recon_img | 8,819 | 37.017241 | 126 | py |
dilran | dilran-main/dataset_loader.py | # Get dataloader for MRI-CT data
# Author: Simon Zhou, last modify Nov. 11, 2022
'''
Change log:
- Simon: file created, implement dataset loader
'''
import os
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader, random_split, Dataset
import skimage.io as io
class getIndex(Dataset):
def __init__(self, total_len):
self.total_len = total_len
def __len__(self):
return self.total_len
def __getitem__(self, ind):
return torch.Tensor([ind])
def get_common_file(target_dir):
'''
target_dir: target directory of data, for now is MRI-CT data
return: ct, mri file names (should be the same name and order)
'''
ct = os.path.join(target_dir, "CT")
mri = os.path.join(target_dir, "MRI")
ct_file = []
mri_file = []
# get file name for ct images
for file in sorted(os.listdir(ct)):
ct_file.append(file)
# get file name for mri images
for file in sorted(os.listdir(mri)):
mri_file.append(file)
diff1 = [file for file in ct_file if file not in mri_file]
diff2 = [file for file in mri_file if file not in ct_file]
assert len(diff1) == len(diff2) == 0, "data is somehow not paired"
return ct_file, mri_file
def load_data(file, target_dir, test_num):
'''
file: list of file names (for ct, mri)
target_dir: file directory
test_num: number of test data
return: torch .pt file store ct and mri
'''
test_ind = np.random.choice(len(file), size=test_num, replace = False)
print(test_ind)
test = []
for ind in test_ind:
test.append(file[ind])
#print(test)
HEIGHT = 256
WIDTH = 256
# 1 channel image, with shape 256x256
data_ct = torch.empty(0, 1, HEIGHT, WIDTH)
data_mri = torch.empty(0, 1, HEIGHT, WIDTH)
data_ct_t = torch.empty(0, 1, HEIGHT, WIDTH)
data_mri_t = torch.empty(0, 1, HEIGHT, WIDTH)
for f in file:
# read data and normalize
img_ct = io.imread(os.path.join(target_dir, "CT", f)).astype(np.float32) / 255.
img_mri = io.imread(os.path.join(target_dir, "MRI", f)).astype(np.float32) / 255.
img_ct = torch.from_numpy(img_ct)
img_mri = torch.from_numpy(img_mri)
img_ct = img_ct.unsqueeze(0).unsqueeze(0) # change shape to (1, 1, 256, 256)
img_mri = img_mri.unsqueeze(0).unsqueeze(0)
if f not in test:
data_ct = torch.cat((data_ct, img_ct), dim = 0)
data_mri = torch.cat((data_mri, img_mri), dim = 0)
else:
data_ct_t = torch.cat((data_ct_t, img_ct), dim = 0)
data_mri_t = torch.cat((data_mri_t, img_mri), dim = 0)
return data_ct, data_mri, data_ct_t, data_mri_t
def get_loader(ct, mri, tv_ratio, bs):
'''
ct: ct data
mri: mri data
tv_ratio: train & validation ratio
bs: batch size
return: Dataloader class for train and val
'''
assert ct.shape[0] == mri.shape[0], "two datasets do not have the same length? whats wrong"
total_len = ct.shape[0] + mri.shape[0]
n_train = int(tv_ratio * total_len)
train_set, val_set = random_split(getIndex(total_len), lengths=(n_train, total_len - n_train))
train_loader = DataLoader(train_set, batch_size=bs, num_workers=0, shuffle=True, drop_last=False)
val_loader = DataLoader(val_set, batch_size=bs, num_workers=0, shuffle=False, drop_last=False)
return train_loader, val_loader
def get_loader2(ct, mri, tv_ratio, bs):
'''
ct: ct data
mri: mri data
tv_ratio: train & validation ratio
bs: batch size
return: Dataloader class for train and val
'''
assert ct.shape[0] == mri.shape[0], "two datasets do not have the same length? whats wrong"
total_len = ct.shape[0]
n_train = int(tv_ratio * total_len)
train_set, val_set = random_split(getIndex(total_len), lengths=(n_train, total_len - n_train))
train_loader = DataLoader(train_set, batch_size=bs, num_workers=0, shuffle=True, drop_last=False)
val_loader = DataLoader(val_set, batch_size=bs, num_workers=0, shuffle=False, drop_last=False)
return train_loader, val_loader
# if __name__ == "__main__":
# target_dir = "./CT-MRI/"
# ct, mri = get_common_file(target_dir)
# train_ct, train_mri, test_ct, test_mri = load_data(ct, target_dir, 20)
# print(train_ct.shape, train_mri.shape, test_ct.shape, test_mri.shape)
# train_loader, val_loader = get_loader(train_ct, train_mri, 0.8, 16)
# print(len(train_loader), len(val_loader)) | 4,508 | 31.207143 | 101 | py |
dilran | dilran-main/eval.py | # Evaluation Metrics and get results
# Author: Reacher Z., last modify Nov. 26, 2022
"""
Change log:
- Reacher: file created, implement PSNR, SSIM, NMI, MI
"""
import numpy as np
import sklearn.metrics as skm
import torch
from skimage.metrics import peak_signal_noise_ratio, normalized_mutual_information
from torchmetrics import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure
#from TMQI import TMQI, TMQIr
def psnr(img_pred: torch.Tensor, img_true: torch.Tensor):
"""
To compute PeakSignalNoiseRatio
Return: float
"""
peakSignalNoiseRatio = PeakSignalNoiseRatio(data_range=1.0)
return peakSignalNoiseRatio(img_pred, img_true).item()
def ssim(img_pred: torch.Tensor, img_true: torch.Tensor):
"""
To compute PeakSignalNoiseRatio
Input: [N, C, H, W] shape
Return: float
"""
img_pred = img_pred.unsqueeze(0).unsqueeze(0)
img_true = img_true.unsqueeze(0).unsqueeze(0)
structuralSimilarityIndexMeasure = StructuralSimilarityIndexMeasure(data_range=1.0)
return structuralSimilarityIndexMeasure(img_pred, img_true).item()
def nmi(img_pred: torch.Tensor, img_true: torch.Tensor):
"""
normalized mutual information (NMI)
Return: float
"""
img_pred_np = np.array(img_pred.squeeze())
img_true_np = np.array(img_true.squeeze())
nor_mi = normalized_mutual_information(img_pred_np, img_true_np)
return nor_mi
def mutual_information(img_pred: torch.Tensor, img_true: torch.Tensor):
"""
Mutual Information:
I(A,B) = H(A) + H(B) - H(A,B)
H(A)= -sum p(a_i) * log p(a_i)
Mutual information is a measure of image matching, that does not require the signal
to be the same in the two images. It is a measure of how well you can predict the signal
in the second image, given the signal intensity in the first.
Return: float
"""
img_pred_uint8 = (np.array(img_pred.squeeze()) * 255).astype(np.uint8).flatten()
img_true_uint8 = (np.array(img_true.squeeze()) * 255).astype(np.uint8).flatten()
size = img_true_uint8.shape[-1]
pa = np.histogram(img_pred_uint8, 256, (0, 255))[0] / size
pb = np.histogram(img_true_uint8, 256, (0, 255))[0] / size
ha = -np.sum(pa * np.log(pa + 1e-20))
hb = -np.sum(pb * np.log(pb + 1e-20))
pab = (np.histogram2d(img_pred_uint8, img_true_uint8, 256, [[0, 255], [0, 255]])[0]) / size
hab = -np.sum(pab * np.log(pab + 1e-20))
mi = ha + hb - hab
# hist_2d, x_edges, y_edges = np.histogram2d(img_pred.numpy().ravel(), img_true.numpy().ravel(), bins=256)
# pxy = hist_2d / float(np.sum(hist_2d))
# px = np.sum(pxy, axis=1) # marginal for x over y
# py = np.sum(pxy, axis=0) # marginal for y over x
# px_py = px[:, None] * py[None, :] # Broadcast to multiply marginals
# # Now we can do the calculation using the pxy, px_py 2D arrays
# nzs = pxy > 0 # Only non-zero pxy values contribute to the sum
# return np.sum(pxy[nzs] * np.log(pxy[nzs] / px_py[nzs]))
return mi
def mi2(x, y):
x = np.reshape(x, -1)
y = np.reshape(y, -1)
return skm.mutual_info_score(x, y)
| 3,094 | 35.411765 | 110 | py |
dilran | dilran-main/our_utils.py | # helper functions for the project
# Author: Simon Zhou, last modify Nov. 15, 2022
'''
Change log:
- Simon: file created, implement edge detector
- Simon: create helper function for perceptual loss
- Reacher: create fusion strategy function
- Simon: add random seed func for seeding
'''
import torch
import torch.nn as nn
import numpy as np
from skimage import feature
import random
def random_seed(seed_value, use_cuda):
np.random.seed(seed_value) # cpu vars
torch.manual_seed(seed_value) # cpu vars
random.seed(seed_value) # Python
if use_cuda:
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value) # gpu vars
torch.backends.cudnn.deterministic = True #needed
torch.backends.cudnn.benchmark = False
class PercepHook:
'''
Pytorch forward hook for computing the perceptual loss
without modifying the original VGG16 network
'''
def __init__(self, module):
self.features = None
self.hook = module.register_forward_hook(self.on)
def on(self, module, inputs, outputs):
self.features = outputs
def close(self):
self.hook.remove()
def edge_detector(img, sigma):
'''
canny edge detection for input image
two choices: 1) edge detection in the training process, 2) not include in training process
'''
if len(img.shape) == 3:
img = img.squeeze(0) # change shape to [256,256]
edges = feature.canny(img, sigma = sigma)
return edges
def l2_norm():
'''
mse loss (matrix F norm)
'''
return
def gradient_loss(fused_img, input_img, device):
'''
compute image gradient loss between fused image and input image
'''
return None
class Percep_loss(nn.Module):
'''
compute perceptual loss between fused image and input image
'''
def __init__(self, vgg, block_idx, device):
'''
block_index: the index of the block in VGG16 network, int or list
int represents single layer perceptual loss
list represents multiple layers perceptual loss
'''
super(Percep_loss, self).__init__()
self.block_idx = block_idx
self.device = device
# load vgg16_bn model features
self.vgg = vgg.features.to(device).eval()
#self.loss = nn.MSELoss()
# unable gradient update
for param in self.vgg.parameters():
param.requires_grad = False
# remove maxpooling layer and relu layer
# TODO:check this part on whether we want relu or not
bns = [i - 2 for i, m in enumerate(self.vgg) if isinstance(m, nn.MaxPool2d)]
# register forward hook
self.hooks = [PercepHook(self.vgg[bns[i]]) for i in block_idx]
self.features = self.vgg[0: bns[block_idx[-1]] + 1]
def forward(self, inputs, targets):
'''
compute perceptual loss between inputs and targets
'''
if inputs.shape[1] == 1:
# expand 1 channel image to 3 channel, [B, 1, H, W] -> [B, 3, H, W]
inputs = inputs.expand(-1, 3, -1, -1)
if targets.shape[1] == 1:
targets = targets.expand(-1, 3, -1, -1)
# get vgg output
self.features(inputs)
input_features = [hook.features.clone() for hook in self.hooks]
self.features(targets)
target_features = [hook.features for hook in self.hooks]
assert len(input_features) == len(target_features), 'number of input features and target features should be the same'
loss = 0
for i in range(len(input_features)):
#loss += self.loss(input_features[i], target_features[i]) # mse loss
loss += ((input_features[i] - target_features[i]) ** 2).mean() # l2 norm
return loss
def compute_perp_loss():
'''
you can use the perp_loss class to compute perceptual loss
'''
return None
def l1_norm(matrix):
"""
Calculate the L1 norm for some fusion strategies
"""
return torch.abs(matrix).sum()
def fusion_strategy(f1, f2, device, strategy="average"):
"""
f1: the extracted features of images 1
f2: the extracted features of images 2
strategy: 6 fusion strategy, including:
"addition", "average", "FER", "L1NW", "AL1NW", "FL1N"
addition strategy
average strategy
FER strategy: Feature Energy Ratio strategy
L1NW strategy: L1-Norm Weight Strategy
AL1NW strategy: Average L1-Norm Weight Strategy
FL1N strategy: Feature L1-Norm Strategy
Note:
If the original image is PET or SPECT modal,
it should be converted into YCbCr data, including Y1, Cb and Cr.
"""
# The fused feature
fused = torch.zeros_like(f1, device=device)
if strategy == "addition":
fused = f1 + f2
elif strategy == "average":
fused = (f1 + f2) / 2
elif strategy == "FER":
f_sum = (f1 ** 2 + f2 ** 2).clone()
f_sum[f_sum == 0] = 1
k1 = f1 ** 2 / f_sum
k2 = f2 ** 2 / f_sum
fused = k1 * f1 + k2 * f2
elif strategy == "L1NW":
l1 = l1_norm(f1)
print(l1)
l2 = l1_norm(f2)
print(l2)
fused = l1 * f1 + l2 * f2
elif strategy == "AL1NW":
p1 = l1_norm(f1) / 2
p2 = l1_norm(f2) / 2
fused = p1 * f1 + p2 * f2
elif strategy == "FL1N":
l1 = l1_norm(f1)
l2 = l1_norm(f2)
w1 = l1 / (l1 + l2)
w2 = l2 / (l1 + l2)
fused = w1 * f1 + w2 * f2
elif strategy == "SFNN":
def process_for_nuc(f):
f = f.squeeze(0)
total = []
for i in range(f.shape[0]):
temp = torch.norm(f[i], "nuc")
# total = np.append(total, temp)
total.append(temp.item())
return total
f1_soft = nn.functional.softmax(f1)
f2_soft = nn.functional.softmax(f2)
l1 = process_for_nuc(f1_soft)
#print(l1)
l2 = process_for_nuc(f2_soft)
l1 = np.array(l1)
l2 = np.array(l2)
# w1 = np.mean(l1) / (np.mean(l1) + np.mean(l2))
# w2 = np.mean(l2) / (np.mean(l1) + np.mean(l2))
# w1 = sum(l1) / (sum(l1) + sum(l2))
# w2 = sum(l2) / (sum(l1) + sum(l2))
w1 = max(l1)**2 / (max(l1)**2 + max(l2)**2)
w2 = max(l2)**2 / (max(l1)**2 + max(l2)**2)
# f_sum = (f1 ** 2 + f2 ** 2).clone()
# f_sum[f_sum == 0] = 1
# k1 = f1 ** 2 / f_sum
# k2 = f2 ** 2 / f_sum
fused = w1 * f1 + w2 * f2
# Need to do reconstruction on "fused"
return fused
| 6,597 | 28.855204 | 125 | py |
dilran | dilran-main/train.py | # Training script for the project
# Author: Simon Zhou, last modify Nov. 18, 2022
'''
Change log:
-Simon: file created, write some training code
-Simon: refine training script
'''
import argparse
import os
import sys
sys.path.append("../")
from tqdm import trange
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.models import vgg16_bn
import meta_config as config
from model import *
from our_utils import *
from dataset_loader import *
from loss import *
import wandb
parser = argparse.ArgumentParser(description='parameters for the training script')
parser.add_argument('--dataset', type=str, default="CT-MRI", help="which dataset to use, available option: CT-MRI, MRI-PET, MRI-SPECT")
parser.add_argument('--batch_size', type=int, default=4, help='batch size for training')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs for training')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate for training')
parser.add_argument('--lr_decay', type=bool, default=False, help='decay learing rate?')
parser.add_argument('--accum_batch', type=int, default=1, help='number of batches for gradient accumulation')
parser.add_argument('--lambda1', type=float, default=0.5, help='weight for image gradient loss')
parser.add_argument('--lambda2', type=float, default=0.5, help='weight for perceptual loss')
#parser.add_argument('--checkpoint', type=str, default='./model', help='Path to checkpoint')
parser.add_argument('--cuda', action='store_true', help='whether to use cuda', default= True)
parser.add_argument('--seed', type=int, default=3407, help='random seed to use')
parser.add_argument('--base_loss', type=str, default='l1_charbonnier', help='which loss function to use for pixel-level (l2 or l1 charbonnier)')
opt = parser.parse_args()
######### whether to use cuda ####################
device = torch.device("cuda:0" if opt.cuda else "cpu")
#################################################
########## seeding ##############
seed_val = opt.seed
random_seed(seed_val, opt.cuda)
################################
############ making dirs########################
if not os.path.exists(config.res_dir):
os.mkdir(config.res_dir)
model_dir = os.path.join(config.res_dir, "pretrained_models")
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if not os.path.exists(config.test_data_dir):
os.mkdir(config.test_data_dir)
################################################
####### loading dataset ####################################
target_dir = os.path.join(config.data_dir, opt.dataset)
ct, mri = get_common_file(target_dir)
train_ct, train_mri, test_ct, test_mri = load_data(ct, target_dir, config.test_num)
# torch.save(test_ct, os.path.join(c.test_data_dir, "ct_test.pt"))
# torch.save(test_mri, os.path.join(c.test_data_dir, "mri_test.pt"))
#print(train_ct.shape, train_mri.shape, test_ct.shape, test_mri.shape)
train_total = torch.cat((train_ct, train_mri), dim = 0).to(device)
# these loaders return index, not the actual image
train_loader, val_loader = get_loader(train_ct, train_mri, config.train_val_ratio, opt.batch_size)
print("train loader length: ", len(train_loader), " val loder length: ", len(val_loader))
# check the seed is working
# for batch_idx in train_loader:
# batch_idx = batch_idx.view(-1).long()
# print(batch_idx)
# print("validation index")
# for batch_idx in val_loader:
# batch_idx = batch_idx.view(-1).long()
# print(batch_idx)
# sys.exit()
############################################################
############ loading model #####################
model = fullModel().to(device)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if opt.lr_decay:
stepLR = optim.lr_scheduler.StepLR(optimizer, step_size = 100, gamma=0.5)
###################################################
##### downloading pretrained vgg model ##################
vgg = vgg16_bn(pretrained = True)
########################################################
############## train model ##############
wandb.init(project="test-project", entity="csc2529", config=opt) # visualize in wandb
# wandb.config = {
# "learning_rate": opt.lr,
# "epochs": opt.epochs,
# "batch_size": opt.batch_size,
# "lambda1": c.lambda1,
# "lambda2": c.lambda2
# }
wandb.watch(model)
# gradient accumulation for small batch
NUM_ACCUMULATION_STEPS = opt.accum_batch
train_loss = []
val_loss = []
t = trange(opt.epochs, desc='Training progress...', leave=True)
lowest_val_loss = int(1e9)
for i in t:
print("new epoch {} starts!".format(i))
# clear gradient in model
model.zero_grad()
b_loss = 0
# train model
model.train()
for j, batch_idx in enumerate(train_loader):
# clear gradient in optimizer
optimizer.zero_grad()
batch_idx = batch_idx.view(-1).long()
img = train_total[batch_idx]
img_out = model(img)
# compute loss
loss,_,_,_ = loss_func2(vgg, img_out, img, opt.lambda1, opt.lambda2, config.block_idx, device)
# back propagate and update weights
#print("batch reg, grad, percep loss: ", reg_loss.item(), img_grad.item(), percep.item())
#loss = loss / NUM_ACCUMULATION_STEPS
loss.backward()
#if ((j + 1) % NUM_ACCUMULATION_STEPS == 0) or (j + 1 == len(train_loader)):
optimizer.step()
b_loss += loss.item()
#wandb.log({"loss": loss})
# store loss
ave_loss = b_loss / len(train_loader)
train_loss.append(ave_loss)
print("epoch {}, training loss is: {}".format(i, ave_loss))
# validation
val_loss = []
val_display_img = []
with torch.no_grad():
b_loss = 0
# eval model, unable update weights
model.eval()
for k, batch_idx in enumerate(val_loader):
batch_idx = batch_idx.view(-1).long()
val_img = train_total[batch_idx]
val_img_out = model(val_img)
# display first image to visualize, this can be changed
val_display_img.extend([val_img_out[i].squeeze(0).cpu().numpy() for i in range(1)])
loss, _,_,_= loss_func2(vgg, img_out, img, opt.lambda1, opt.lambda2, config.block_idx, device)
b_loss += loss.item()
ave_val_loss = b_loss / len(val_loader)
val_loss.append(ave_val_loss)
print("epoch {}, validation loss is: {}".format(i, ave_val_loss))
# define a metric we are interested in the minimum of
wandb.define_metric("train loss", summary="min")
# define a metric we are interested in the maximum of
wandb.define_metric("val loss", summary="min")
wandb.log({"train loss": ave_loss, "epoch": i})
wandb.log({"val loss": ave_val_loss, "epoch": i})
wandb.log({"val sample images": [wandb.Image(img) for img in val_display_img]})
# save model
if ave_val_loss < lowest_val_loss:
torch.save(model.state_dict(), model_dir+"/model_at_{}.pt".format(i))
lowest_val_loss = ave_val_loss
print("model is saved in epoch {}".format(i))
# lr decay update
if opt.lr_decay:
stepLR.step()
######################################## | 7,150 | 36.439791 | 144 | py |
audio-text_retrieval | audio-text_retrieval-main/train.py | #!/usr/bin/env python3
# coding: utf-8
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
import os
import argparse
import torch
from trainer.trainer import train
from tools.config_loader import get_config
if __name__ == '__main__':
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
torch.backends.cudnn.enabled = False
parser = argparse.ArgumentParser(description='Settings.')
parser.add_argument('-n', '--exp_name', default='exp_name', type=str,
help='Name of the experiment.')
# parser.add_argument('-d', '--dataset', default='Clotho', type=str,
# help='Dataset used')
# parser.add_argument('-l', '--lr', default=0.0001, type=float,
# help='Learning rate')
# parser.add_argument('-c', '--config', default='settings', type=str,
# help='Name of the setting file.')
# parser.add_argument('-o', '--loss', default='weight', type=str,
# help='Name of the loss function.')
# parser.add_argument('-f', '--freeze', default='False', type=str,
# help='Freeze or not.')
# parser.add_argument('-e', '--batch', default=24, type=int,
# help='Batch size.')
# parser.add_argument('-m', '--margin', default=0.2, type=float,
# help='Margin value for loss')
# parser.add_argument('-s', '--seed', default=20, type=int,
# help='Training seed')
args = parser.parse_args()
config = get_config(args.config)
config.exp_name = args.exp_name
# config.dataset = args.dataset
# config.training.lr = args.lr
# config.training.loss = args.loss
# config.training.freeze = eval(args.freeze)
# config.data.batch_size = args.batch
# config.training.margin = args.margin
# config.training.seed = args.seed
train(config)
| 1,980 | 35.685185 | 73 | py |
audio-text_retrieval | audio-text_retrieval-main/trainer/trainer.py | #!/usr/bin/env python3
# coding: utf-8
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
import platform
import sys
import time
import numpy as np
import torch
from tqdm import tqdm
from pathlib import Path
from loguru import logger
from pprint import PrettyPrinter
from torch.utils.tensorboard import SummaryWriter
from tools.utils import setup_seed, AverageMeter, a2t, t2a
from tools.loss import BiDirectionalRankingLoss, TripletLoss, NTXent, WeightTriplet
from models.ASE_model import ASE
from data_handling.DataLoader import get_dataloader
def train(config):
# setup seed for reproducibility
setup_seed(config.training.seed)
# set up logger
exp_name = config.exp_name
folder_name = '{}_data_{}_freeze_{}_lr_{}_' \
'margin_{}_seed_{}'.format(exp_name, config.dataset,
str(config.training.freeze),
config.training.lr,
config.training.margin,
config.training.seed)
log_output_dir = Path('outputs', folder_name, 'logging')
model_output_dir = Path('outputs', folder_name, 'models')
log_output_dir.mkdir(parents=True, exist_ok=True)
model_output_dir.mkdir(parents=True, exist_ok=True)
logger.remove()
logger.add(sys.stdout, format='{time: YYYY-MM-DD at HH:mm:ss} | {message}', level='INFO',
filter=lambda record: record['extra']['indent'] == 1)
logger.add(log_output_dir.joinpath('output.txt'), format='{time: YYYY-MM-DD at HH:mm:ss} | {message}', level='INFO',
filter=lambda record: record['extra']['indent'] == 1)
main_logger = logger.bind(indent=1)
# setup TensorBoard
writer = SummaryWriter(log_dir=str(log_output_dir) + '/tensorboard')
# print training settings
printer = PrettyPrinter()
main_logger.info('Training setting:\n'
f'{printer.pformat(config)}')
# set up model
device, device_name = ('cuda',
torch.cuda.get_device_name(torch.cuda.current_device())) \
if torch.cuda.is_available() else ('cpu', platform.processor())
main_logger.info(f'Process on {device_name}')
model = ASE(config)
model = model.to(device)
# set up optimizer and loss
optimizer = torch.optim.Adam(params=model.parameters(), lr=config.training.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
if config.training.loss == 'triplet':
criterion = TripletLoss(margin=config.training.margin)
elif config.training.loss == 'ntxent':
criterion = NTXent()
elif config.training.loss == 'weight':
criterion = WeightTriplet(margin=config.training.margin)
else:
criterion = BiDirectionalRankingLoss(margin=config.training.margin)
# set up data loaders
train_loader = get_dataloader('train', config)
val_loader = get_dataloader('val', config)
test_loader = get_dataloader('test', config)
main_logger.info(f'Size of training set: {len(train_loader.dataset)}, size of batches: {len(train_loader)}')
main_logger.info(f'Size of validation set: {len(val_loader.dataset)}, size of batches: {len(val_loader)}')
main_logger.info(f'Size of test set: {len(test_loader.dataset)}, size of batches: {len(test_loader)}')
ep = 1
# resume from a checkpoint
if config.training.resume:
checkpoint = torch.load(config.path.resume_model)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
ep = checkpoint['epoch']
# training loop
recall_sum = []
for epoch in range(ep, config.training.epochs + 1):
main_logger.info(f'Training for epoch [{epoch}]')
epoch_loss = AverageMeter()
start_time = time.time()
model.train()
for batch_id, batch_data in tqdm(enumerate(train_loader), total=len(train_loader)):
audios, captions, audio_ids, _ = batch_data
# move data to GPU
audios = audios.to(device)
audio_ids = audio_ids.to(device)
audio_embeds, caption_embeds = model(audios, captions)
loss = criterion(audio_embeds, caption_embeds, audio_ids)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.training.clip_grad)
optimizer.step()
epoch_loss.update(loss.cpu().item())
writer.add_scalar('train/loss', epoch_loss.avg, epoch)
elapsed_time = time.time() - start_time
main_logger.info(f'Training statistics:\tloss for epoch [{epoch}]: {epoch_loss.avg:.3f},'
f'\ttime: {elapsed_time:.1f}, lr: {scheduler.get_last_lr()[0]:.6f}.')
# validation loop, validation after each epoch
main_logger.info("Validating...")
r1, r5, r10, r50, medr, meanr = validate(val_loader, model, device)
r_sum = r1 + r5 + r10
recall_sum.append(r_sum)
writer.add_scalar('val/r@1', r1, epoch)
writer.add_scalar('val/r@5', r5, epoch)
writer.add_scalar('val/r@10', r10, epoch)
writer.add_scalar('val/r@50', r50, epoch)
writer.add_scalar('val/med@r', medr, epoch)
writer.add_scalar('val/mean@r', meanr, epoch)
# save model
if r_sum >= max(recall_sum):
main_logger.info('Model saved.')
torch.save({
'model': model.state_dict(),
'optimizer': model.state_dict(),
'epoch': epoch,
}, str(model_output_dir) + '/best_model.pth')
scheduler.step()
# Training done, evaluate on evaluation set
main_logger.info('Training done. Start evaluating.')
best_checkpoint = torch.load(str(model_output_dir) + '/best_model.pth')
model.load_state_dict(best_checkpoint['model'])
best_epoch = best_checkpoint['epoch']
main_logger.info(f'Best checkpoint occurred in {best_epoch} th epoch.')
validate(test_loader, model, device)
main_logger.info('Evaluation done.')
writer.close()
def validate(data_loader, model, device):
val_logger = logger.bind(indent=1)
model.eval()
with torch.no_grad():
# numpy array to keep all embeddings in the dataset
audio_embs, cap_embs = None, None
for i, batch_data in tqdm(enumerate(data_loader), total=len(data_loader)):
audios, captions, audio_ids, indexs = batch_data
# move data to GPU
audios = audios.to(device)
audio_embeds, caption_embeds = model(audios, captions)
if audio_embs is None:
audio_embs = np.zeros((len(data_loader.dataset), audio_embeds.size(1)))
cap_embs = np.zeros((len(data_loader.dataset), caption_embeds.size(1)))
audio_embs[indexs] = audio_embeds.cpu().numpy()
cap_embs[indexs] = caption_embeds.cpu().numpy()
# evaluate text to audio retrieval
r1, r5, r10, r50, medr, meanr = t2a(audio_embs, cap_embs)
val_logger.info('Caption to audio: r1: {:.2f}, r5: {:.2f}, '
'r10: {:.2f}, r50: {:.2f}, medr: {:.2f}, meanr: {:.2f}'.format(
r1, r5, r10, r50, medr, meanr))
# evaluate audio to text retrieval
r1_a, r5_a, r10_a, r50_a, medr_a, meanr_a = a2t(audio_embs, cap_embs)
val_logger.info('Audio to caption: r1: {:.2f}, r5: {:.2f}, '
'r10: {:.2f}, r50: {:.2f}, medr: {:.2f}, meanr: {:.2f}'.format(
r1_a, r5_a, r10_a, r50_a, medr_a, meanr_a))
return r1, r5, r10, r50, medr, meanr
| 7,788 | 36.628019 | 120 | py |
audio-text_retrieval | audio-text_retrieval-main/tools/loss.py | #!/usr/bin/env python3
# coding: utf-8
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
import torch
import torch.nn as nn
from sentence_transformers import util
import torch.nn.functional as F
class TripletLoss(nn.Module):
def __init__(self, margin=0.2):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, audio_embeds, text_embeds, labels):
"""
:param audio_embeds:
:param text_embeds:
:param labels:
:return:
"""
n = audio_embeds.size(0) # batch size
# dist = []
sim_a2t = util.cos_sim(audio_embeds, text_embeds) # (batch_size, x batch_size)
sim_ap = torch.diag(sim_a2t).view(n, 1)
d1 = sim_ap.expand_as(sim_a2t)
d2 = sim_ap.t().expand_as(sim_a2t)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = F.relu(self.margin + sim_a2t - d1)
# compare every diagonal score to scores in its row
# audio retrieval
cost_a = F.relu(self.margin + sim_a2t - d2)
# clear diagonals
mask = labels.expand(n, n).eq(labels.expand(n, n).t()).to(cost_a.device)
cost_s = cost_s.masked_fill(mask, 0)
cost_a = cost_a.masked_fill(mask, 0)
cost_s = cost_s.max(1)[0]
cost_a = cost_a.max(0)[0]
loss = (cost_s.sum() + cost_a.sum()) / n
return loss
class BiDirectionalRankingLoss(nn.Module):
def __init__(self, margin=0.2):
super(BiDirectionalRankingLoss, self).__init__()
self.margin = margin
def forward(self, audio_embeds, text_embeds, labels):
"""
:param audio_embeds: (batch_size, embed_dim)
:param text_embeds: (batch_size, embed_dim)
:param labels: (batch_size, )
:return:
"""
n = audio_embeds.size(0) # batch size
# dist = []
sim_a2t = util.cos_sim(audio_embeds, text_embeds) # (batch_size, x batch_size)
sim_ap = torch.diag(sim_a2t).view(n, 1)
d1 = sim_ap.expand_as(sim_a2t)
d2 = sim_ap.t().expand_as(sim_a2t)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = F.relu(self.margin + sim_a2t - d1)
# compare every diagonal score to scores in its row
# audio retrieval
cost_a = F.relu(self.margin + sim_a2t - d2)
mask = labels.expand(n, n).eq(labels.expand(n, n).t()).to(cost_a.device)
cost_s = cost_s.masked_fill(mask, 0)
cost_a = cost_a.masked_fill(mask, 0)
loss = (cost_s.sum() + cost_a.sum()) / n
return loss
class NTXent(nn.Module):
def __init__(self, temperature=0.07):
super(NTXent, self).__init__()
self.loss = nn.LogSoftmax(dim=1)
self.tau = temperature
def forward(self, audio_embeds, text_embeds, labels):
n = audio_embeds.shape[0]
a2t = util.cos_sim(audio_embeds, text_embeds) / self.tau
t2a = util.cos_sim(text_embeds, audio_embeds) / self.tau
mask = labels.expand(n, n).eq(labels.expand(n, n).t()).to(a2t.device)
mask_diag = mask.diag()
mask_diag = torch.diag_embed(mask_diag)
mask = mask ^ mask_diag
a2t_loss = - self.loss(a2t).masked_fill(mask, 0).diag().mean()
t2a_loss = - self.loss(t2a).masked_fill(mask, 0).diag().mean()
loss = 0.5 * a2t_loss + 0.5 * t2a_loss
return loss
class WeightTriplet(nn.Module):
"""
Compute contrastive loss
"""
def __init__(self, margin=0.2):
super(WeightTriplet, self).__init__()
self.margin = margin
def polyloss(self, sim_mat, label):
epsilon = 1e-5
size = sim_mat.size(0)
hh = sim_mat.t()
loss = list()
for i in range(size):
pos_pair_ = sim_mat[i][i]
pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]
neg_pair_ = sim_mat[i][label != label[i]]
neg_pair = neg_pair_[neg_pair_ + self.margin > min(pos_pair_)]
pos_pair = pos_pair_
if len(neg_pair) < 1 or len(pos_pair) < 1:
continue
pos_loss = torch.clamp(0.2 * torch.pow(pos_pair, 2) - 0.7 * pos_pair + 0.5, min=0)
neg_pair = max(neg_pair)
neg_loss = torch.clamp(0.9 * torch.pow(neg_pair, 2) - 0.4 * neg_pair + 0.03, min=0)
loss.append(pos_loss + neg_loss)
for i in range(size):
pos_pair_ = hh[i][i]
pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]
neg_pair_ = hh[i][label != label[i]]
neg_pair = neg_pair_[neg_pair_ + self.margin > min(pos_pair_)]
pos_pair = pos_pair_
if len(neg_pair) < 1 or len(pos_pair) < 1:
continue
pos_loss = torch.clamp(0.2 * torch.pow(pos_pair, 2) - 0.7 * pos_pair + 0.5, min=0)
neg_pair = max(neg_pair)
neg_loss = torch.clamp(0.9 * torch.pow(neg_pair, 2) - 0.4 * neg_pair + 0.03, min=0)
loss.append(pos_loss + neg_loss)
if len(loss) == 0:
return torch.zeros([], requires_grad=True)
loss = sum(loss) / size
return loss
def forward(self, audio_embeds, text_embeds, labels):
# compute image-sentence score matrix
scores = util.cos_sim(audio_embeds, text_embeds)
loss = self.polyloss(scores, labels)
return loss
| 5,474 | 29.248619 | 95 | py |
audio-text_retrieval | audio-text_retrieval-main/tools/utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
"""
Evaluation tools adapted from https://github.com/fartashf/vsepp/blob/master/evaluation.py
"""
import numpy as np
import torch
import random
from sentence_transformers import util
from loguru import logger
from tools.file_io import load_pickle_file
from gensim.models.word2vec import Word2Vec
def setup_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def align_word_embedding(words_list_path, model_path, nhid):
words_list = load_pickle_file(words_list_path)
w2v_model = Word2Vec.load(model_path)
ntoken = len(words_list)
weights = np.zeros((ntoken, nhid))
for i, word in enumerate(words_list):
if word in w2v_model.wv.index_to_key:
embedding = w2v_model.wv[word]
weights[i] = embedding
weights = torch.from_numpy(weights).float()
return weights
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
# evaluation tools
def a2t(audio_embs, cap_embs, return_ranks=False):
# audio to caption retrieval
num_audios = int(audio_embs.shape[0] / 5)
index_list = []
ranks = np.zeros(num_audios)
top1 = np.zeros(num_audios)
mAP10 = np.zeros(num_audios)
for index in range(num_audios):
# get query audio
audio = audio_embs[5 * index].reshape(1, audio_embs.shape[1])
# compute scores
d = util.cos_sim(torch.Tensor(audio), torch.Tensor(cap_embs)).squeeze(0).numpy()
inds = np.argsort(d)[::-1]
index_list.append(inds[0])
inds_map = []
rank = 1e20
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
if tmp < 10:
inds_map.append(tmp + 1)
inds_map = np.sort(np.array(inds_map))
if len(inds_map) != 0:
mAP10[index] = np.sum((np.arange(1, len(inds_map) + 1) / inds_map)) / 5
else:
mAP10[index] = 0.
ranks[index] = rank
top1[index] = inds[0]
# compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
r50 = 100.0 * len(np.where(ranks < 50)[0]) / len(ranks)
mAP10 = 100.0 * np.sum(mAP10) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return r1, r5, r10, r50, medr, meanr, ranks, top1
else:
return r1, r5, r10, r50, medr, meanr
def t2a(audio_embs, cap_embs, return_ranks=False):
# caption to audio retrieval
num_audios = int(audio_embs.shape[0] / 5)
audios = np.array([audio_embs[i]for i in range(0, audio_embs.shape[0], 5)])
ranks = np.zeros(5 * num_audios)
top1 = np.zeros(5 * num_audios)
for index in range(num_audios):
# get query captions
queries = cap_embs[5 * index: 5 * index + 5]
# compute scores
d = util.cos_sim(torch.Tensor(queries), torch.Tensor(audios)).numpy()
inds = np.zeros(d.shape)
for i in range(len(inds)):
inds[i] = np.argsort(d[i])[::-1]
ranks[5 * index + i] = np.where(inds[i] == index)[0][0]
top1[5 * index + i] = inds[i][0]
# compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
r50 = 100.0 * len(np.where(ranks < 50)[0]) / len(ranks)
mAP10 = 100.0 * np.sum(1 / (ranks[np.where(ranks < 10)[0]] + 1)) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return r1, r5, r10, r50, medr, meanr, ranks, top1
else:
return r1, r5, r10, r50, medr, meanr
| 4,670 | 28.751592 | 89 | py |
audio-text_retrieval | audio-text_retrieval-main/data_handling/DataLoader.py | #!/usr/bin/env python3
# coding: utf-8
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
import torch
import random
import numpy as np
import h5py
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
class AudioCaptionDataset(Dataset):
def __init__(self, dataset='AudioCaps', split='train'):
"""
load audio clip's waveform and corresponding caption
Args:
dataset: 'AudioCaps', 'Clotho
split: 'train', 'val', 'test'
"""
super(AudioCaptionDataset, self).__init__()
self.dataset = dataset
self.split = split
self.h5_path = f'data/{dataset}/hdf5s/{split}/{split}.h5'
if dataset == 'AudioCaps' and split == 'train':
self.is_train = True
self.num_captions_per_audio = 1
with h5py.File(self.h5_path, 'r') as hf:
self.audio_keys = [audio_name.decode() for audio_name in hf['audio_name'][:]]
# audio_names: [str]
self.captions = [caption.decode() for caption in hf['caption'][:]]
else:
self.is_train = False
self.num_captions_per_audio = 5
with h5py.File(self.h5_path, 'r') as hf:
self.audio_keys = [audio_name.decode() for audio_name in hf['audio_name'][:]]
self.captions = [caption for caption in hf['caption'][:]]
if dataset == 'Clotho':
self.audio_lengths = [length for length in hf['audio_length'][:]]
# [cap_1, cap_2, ..., cap_5]
def __len__(self):
return len(self.audio_keys) * self.num_captions_per_audio
def __getitem__(self, index):
audio_idx = index // self.num_captions_per_audio
audio_name = self.audio_keys[audio_idx]
with h5py.File(self.h5_path, 'r') as hf:
waveform = hf['waveform'][audio_idx]
if self.dataset == 'AudioCaps' and self.is_train:
caption = self.captions[audio_idx]
else:
captions = self.captions[audio_idx]
cap_idx = index % self.num_captions_per_audio
caption = captions[cap_idx].decode()
if self.dataset == 'Clotho':
length = self.audio_lengths[audio_idx]
return waveform, caption, audio_idx, length, index
else:
return waveform, caption, audio_idx, len(waveform), index
def collate_fn(batch_data):
"""
Args:
batch_data:
Returns:
"""
max_audio_length = max([i[3] for i in batch_data])
wav_tensor = []
for waveform, _, _, _, _ in batch_data:
if max_audio_length > waveform.shape[0]:
padding = torch.zeros(max_audio_length - waveform.shape[0]).float()
temp_audio = torch.cat([torch.from_numpy(waveform).float(), padding])
else:
temp_audio = torch.from_numpy(waveform[:max_audio_length]).float()
wav_tensor.append(temp_audio.unsqueeze_(0))
wavs_tensor = torch.cat(wav_tensor)
captions = [i[1] for i in batch_data]
audio_ids = torch.Tensor([i[2] for i in batch_data])
indexs = np.array([i[4] for i in batch_data])
return wavs_tensor, captions, audio_ids, indexs
def get_dataloader(split, config):
dataset = AudioCaptionDataset(config.dataset, split)
if split == 'train':
shuffle = True
drop_last = True
else:
shuffle = False
drop_last = False
return DataLoader(dataset=dataset,
batch_size=config.data.batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=config.data.num_workers,
collate_fn=collate_fn)
| 3,778 | 31.86087 | 93 | py |
audio-text_retrieval | audio-text_retrieval-main/models/TextEncoder.py | #!/usr/bin/env python3
# coding: utf-8
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
import math
import torch
import torch.nn as nn
import numpy as np
from models.BERT_Config import MODELS
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
bert_type = config.bert_encoder.type
dropout = config.training.dropout
self.tokenizer = MODELS[bert_type][1].from_pretrained(bert_type)
if 'clip' not in bert_type:
self.bert_encoder = MODELS[bert_type][0].from_pretrained(bert_type,
add_pooling_layer=False,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
output_hidden_states=False)
else:
self.bert_encoder = MODELS[bert_type][0].from_pretrained(bert_type)
if config.training.freeze:
for name, param in self.bert_encoder.named_parameters():
param.requires_grad = False
def forward(self, captions):
# device = next(self.parameters()).device
device = torch.device('cuda')
tokenized = self.tokenizer(captions, add_special_tokens=True,
padding=True, return_tensors='pt')
input_ids = tokenized['input_ids'].to(device)
attention_mask = tokenized['attention_mask'].to(device)
output = self.bert_encoder(input_ids=input_ids,
attention_mask=attention_mask)[0]
cls = output[:, 0, :]
return cls
| 1,812 | 36 | 106 | py |
audio-text_retrieval | audio-text_retrieval-main/models/ASE_model.py | #!/usr/bin/env python3
# coding: utf-8
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from tools.utils import l2norm
from models.AudioEncoder import Cnn10, ResNet38, Cnn14
from models.TextEncoder import BertEncoder, W2VEncoder
from models.BERT_Config import MODELS
class AudioEnc(nn.Module):
def __init__(self, config):
super(AudioEnc, self).__init__()
if config.cnn_encoder.model == 'Cnn10':
self.audio_enc = Cnn10(config)
elif config.cnn_encoder.model == 'ResNet38':
self.audio_enc = ResNet38(config)
elif config.cnn_encoder.model == 'Cnn14':
self.audio_enc = Cnn14(config)
else:
raise NotImplementedError('No such audio encoder network.')
if config.cnn_encoder.pretrained:
# loading pretrained CNN weights
pretrained_cnn = torch.load('pretrained_models/audio_encoder/{}.pth'.
format(config.cnn_encoder.model))['model']
dict_new = self.audio_enc.state_dict().copy()
trained_list = [i for i in pretrained_cnn.keys()
if not ('fc' in i or i.startswith('spec') or i.startswith('logmel'))]
for i in range(len(trained_list)):
dict_new[trained_list[i]] = pretrained_cnn[trained_list[i]]
self.audio_enc.load_state_dict(dict_new)
if config.training.freeze:
for name, param in self.audio_enc.named_parameters():
param.requires_grad = False
def forward(self, inputs):
audio_encoded = self.audio_enc(inputs)
return audio_encoded
class ASE(nn.Module):
def __init__(self, config):
super(ASE, self).__init__()
self.l2 = config.training.l2
joint_embed = config.joint_embed
self.audio_enc = AudioEnc(config)
if config.cnn_encoder.model == 'Cnn10':
self.audio_linear = nn.Sequential(
nn.Linear(512, joint_embed),
nn.ReLU(),
nn.Linear(joint_embed, joint_embed)
)
elif config.cnn_encoder.model == 'ResNet38' or config.cnn_encoder.model == 'Cnn14':
self.audio_linear = nn.Sequential(
nn.Linear(2048, joint_embed * 2),
nn.ReLU(),
nn.Linear(joint_embed * 2, joint_embed)
)
# self.audio_gated_linear = nn.Linear(joint_embed, joint_embed)
if config.text_encoder == 'bert':
self.text_enc = BertEncoder(config)
bert_type = config.bert_encoder.type
self.text_linear = nn.Sequential(
nn.Linear(MODELS[bert_type][2], joint_embed * 2),
nn.ReLU(),
nn.Linear(joint_embed * 2, joint_embed)
)
elif config.text_encoder == 'w2v':
self.text_enc = W2VEncoder(config)
self.text_linear = nn.Sequential(
nn.Linear(300, joint_embed),
nn.ReLU(),
nn.Linear(joint_embed, joint_embed)
)
def encode_audio(self, audios):
return self.audio_enc(audios)
def encode_text(self, captions):
return self.text_enc(captions)
def forward(self, audios, captions):
audio_encoded = self.encode_audio(audios) # batch x channel
caption_encoded = self.encode_text(captions)
audio_embed = self.audio_linear(audio_encoded)
caption_embed = self.text_linear(caption_encoded)
if self.l2:
# apply l2-norm on the embeddings
audio_embed = l2norm(audio_embed)
caption_embed = l2norm(caption_embed)
return audio_embed, caption_embed
| 3,846 | 33.348214 | 97 | py |
audio-text_retrieval | audio-text_retrieval-main/models/AudioEncoder.py | #!/usr/bin/env python3
# coding: utf-8
# @Author : Xinhao Mei @CVSSP, University of Surrey
# @E-mail : x.mei@surrey.ac.uk
"""
Adapted from PANNs (Pre-trained Audio Neural Networks).
https://github.com/qiuqiangkong/audioset_tagging_cnn/blob/master/pytorch/models.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn10(nn.Module):
def __init__(self, config):
super(Cnn10, self).__init__()
self.bn0 = nn.BatchNorm2d(64)
sr = config.wav.sr
window_size = config.wav.window_size
hop_length = config.wav.hop_length
mel_bins = config.wav.mel_bins
self.dropout = config.training.dropout
self.spectrogram_extractor = Spectrogram(n_fft=window_size,
hop_length=hop_length,
win_length=window_size,
window='hann',
center=True,
pad_mode='reflect',
freeze_parameters=True)
self.logmel_extractor = LogmelFilterBank(sr=sr, n_fft=window_size,
n_mels=mel_bins,
fmin=50,
fmax=14000,
ref=1.0,
amin=1e-10,
top_db=None,
freeze_parameters=True)
self.is_spec_augment = config.training.spec_augmentation
if self.is_spec_augment:
self.spec_augmenter = SpecAugmentation(time_drop_width=64,
time_stripes_num=2,
freq_drop_width=8,
freq_stripes_num=2)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training and self.is_spec_augment:
x = self.spec_augmenter(x)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.dropout, training=self.training)
x = torch.mean(x, dim=3) # batch x channel x time
(x1, _) = torch.max(x, dim=2) # max in time
x2 = torch.mean(x, dim=2) # average in time
x = x1 + x2 # batch x channel (512)
return x
class Cnn14(nn.Module):
def __init__(self, config):
super(Cnn14, self).__init__()
self.bn0 = nn.BatchNorm2d(64)
sr = config.wav.sr
window_size = config.wav.window_size
hop_length = config.wav.hop_length
mel_bins = config.wav.mel_bins
self.dropout = config.training.dropout
self.spectrogram_extractor = Spectrogram(n_fft=window_size,
hop_length=hop_length,
win_length=window_size,
window='hann',
center=True,
pad_mode='reflect',
freeze_parameters=True)
self.logmel_extractor = LogmelFilterBank(sr=sr, n_fft=window_size,
n_mels=mel_bins,
fmin=50,
fmax=14000,
ref=1.0,
amin=1e-10,
top_db=None,
freeze_parameters=True)
self.is_spec_augment = config.training.spec_augmentation
if self.is_spec_augment:
self.spec_augmenter = SpecAugmentation(time_drop_width=64,
time_stripes_num=2,
freq_drop_width=8,
freq_stripes_num=2)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 512, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc1)
def forward(self, input):
""" input: (batch_size, time_steps, mel_bins)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training and self.is_spec_augment:
x = self.spec_augmenter(x)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3) # batch x channel x time
(x1, _) = torch.max(x, dim=2) # max in time
x2 = torch.mean(x, dim=2) # average in time
x = x1 + x2 # batch x channel (2048)
return x
def _resnet_conv3x3(in_planes, out_planes):
#3x3 convolution with padding
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, groups=1, bias=False, dilation=1)
def _resnet_conv1x1(in_planes, out_planes):
#1x1 convolution
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False)
class _ResnetBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(_ResnetBasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('_ResnetBasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in _ResnetBasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.stride = stride
self.conv1 = _resnet_conv3x3(inplanes, planes)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = _resnet_conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_bn(self.bn1)
init_layer(self.conv2)
init_bn(self.bn2)
nn.init.constant_(self.bn2.weight, 0)
def forward(self, x):
identity = x
if self.stride == 2:
out = F.avg_pool2d(x, kernel_size=(2, 2))
else:
out = x
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = F.dropout(out, p=0.2, training=self.training)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class _ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(_ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1:
downsample = nn.Sequential(
_resnet_conv1x1(self.inplanes, planes * block.expansion),
norm_layer(planes * block.expansion),
)
init_layer(downsample[0])
init_bn(downsample[1])
elif stride == 2:
downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2),
_resnet_conv1x1(self.inplanes, planes * block.expansion),
norm_layer(planes * block.expansion),
)
init_layer(downsample[1])
init_bn(downsample[2])
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNet38(nn.Module):
def __init__(self, config):
super(ResNet38, self).__init__()
self.bn0 = nn.BatchNorm2d(64)
sr = config.wav.sr
window_size = config.wav.window_size
hop_length = config.wav.hop_length
mel_bins = config.wav.mel_bins
self.dropout = config.training.dropout
self.spectrogram_extractor = Spectrogram(n_fft=window_size,
hop_length=hop_length,
win_length=window_size,
window='hann',
center=True,
pad_mode='reflect',
freeze_parameters=True)
self.logmel_extractor = LogmelFilterBank(sr=sr, n_fft=window_size,
n_mels=mel_bins,
fmin=50,
fmax=14000,
ref=1.0,
amin=1e-10,
top_db=None,
freeze_parameters=True)
self.is_spec_augment = config.training.spec_augmentation
if self.is_spec_augment:
self.spec_augmenter = SpecAugmentation(time_drop_width=64,
time_stripes_num=2,
freq_drop_width=8,
freq_stripes_num=2)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
# self.conv_block2 = ConvBlock(in_channels=64, out_channels=64)
self.resnet = _ResNet(block=_ResnetBasicBlock, layers=[3, 4, 6, 3], zero_init_residual=True)
self.conv_block_after1 = ConvBlock(in_channels=512, out_channels=2048)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training and self.is_spec_augment:
x = self.spec_augmenter(x)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.dropout, training=self.training, inplace=True)
x = self.resnet(x)
x = F.avg_pool2d(x, kernel_size=(2, 2))
x = F.dropout(x, p=self.dropout, training=self.training, inplace=True)
x = self.conv_block_after1(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=self.dropout, training=self.training, inplace=True)
x = torch.mean(x, dim=3) # batch x channel x time
(x1, _) = torch.max(x, dim=2) # max in time
x2 = torch.mean(x, dim=2) # average in time
x = x1 + x2 # batch x channel (512)
# x = F.relu_(self.fc1(x))
# x = F.dropout(x, p=self.dropout, training=self.training)
return x
| 17,859 | 37.081023 | 100 | py |
PseCo | PseCo-master/tools/test.py | import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint, wrap_fp16_model
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor
from mmdet.models import build_detector
from ssod.utils import patch_config
import ipdb
def parse_args():
parser = argparse.ArgumentParser(description="MMDet test (and eval) a model")
parser.add_argument("config", help="test config file path")
parser.add_argument("checkpoint", help="checkpoint file")
parser.add_argument(
"--work-dir",
help="the directory to save the file containing evaluation metrics",
)
parser.add_argument("--out", help="output result file in pickle format")
parser.add_argument(
"--fuse-conv-bn",
action="store_true",
help="Whether to fuse conv and bn, this will slightly increase"
"the inference speed",
)
parser.add_argument(
"--format-only",
action="store_true",
help="Format the output results without perform evaluation. It is"
"useful when you want to format the result to a specific format and "
"submit it to the test server",
)
parser.add_argument(
"--eval",
type=str,
nargs="+",
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC',
)
parser.add_argument("--show", action="store_true", help="show results")
parser.add_argument(
"--show-dir", help="directory where painted images will be saved"
)
parser.add_argument(
"--show-score-thr",
type=float,
default=0.3,
help="score threshold (default: 0.3)",
)
parser.add_argument(
"--gpu-collect",
action="store_true",
help="whether to use gpu to collect results.",
)
parser.add_argument(
"--tmpdir",
help="tmp directory used for collecting results from multiple "
"workers, available when gpu-collect is not specified",
)
parser.add_argument(
"--cfg-options",
nargs="+",
action=DictAction,
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file. If the value to "
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
"Note that the quotation marks are necessary and that no white space "
"is allowed.",
)
parser.add_argument(
"--options",
nargs="+",
action=DictAction,
help="custom options for evaluation, the key-value pair in xxx=yyy "
"format will be kwargs for dataset.evaluate() function (deprecate), "
"change to --eval-options instead.",
)
parser.add_argument(
"--eval-options",
nargs="+",
action=DictAction,
help="custom options for evaluation, the key-value pair in xxx=yyy "
"format will be kwargs for dataset.evaluate() function",
)
parser.add_argument(
"--launcher",
choices=["none", "pytorch", "slurm", "mpi"],
default="none",
help="job launcher",
)
parser.add_argument("--local_rank", type=int, default=0)
args = parser.parse_args()
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
"--options and --eval-options cannot be both "
"specified, --options is deprecated in favor of --eval-options"
)
if args.options:
warnings.warn("--options is deprecated in favor of --eval-options")
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show or args.show_dir, (
"Please specify at least one operation (save/eval/format/show the "
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"'
)
if args.eval and args.format_only:
raise ValueError("--eval and --format_only cannot be both specified")
if args.out is not None and not args.out.endswith((".pkl", ".pickle")):
raise ValueError("The output file must be a pkl file.")
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get("custom_imports", None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg["custom_imports"])
# set cudnn_benchmark
if cfg.get("cudnn_benchmark", False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get("neck"):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get("rfp_backbone"):
if neck_cfg.rfp_backbone.get("pretrained"):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get("rfp_backbone"):
if cfg.model.neck.rfp_backbone.get("pretrained"):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop("samples_per_gpu", 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop("samples_per_gpu", 1) for ds_cfg in cfg.data.test]
)
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == "none":
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
cfg.work_dir = args.work_dir
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
json_file = osp.join(args.work_dir, f"eval_{timestamp}.json")
elif cfg.get("work_dir", None) is None:
cfg.work_dir = osp.join(
"./work_dirs", osp.splitext(osp.basename(args.config))[0]
)
cfg = patch_config(cfg)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get("test_cfg"))
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu")
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if "CLASSES" in checkpoint.get("meta", {}):
model.CLASSES = checkpoint["meta"]["CLASSES"]
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(
model, data_loader, args.show, args.show_dir, args.show_score_thr
)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f"\nwriting results to {args.out}")
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get("evaluation", {}).copy()
# hard-code way to remove EvalHook args
for key in [
"type",
"interval",
"tmpdir",
"start",
"gpu_collect",
"save_best",
"rule",
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == "__main__":
main()
| 9,642 | 35.665399 | 85 | py |
PseCo | PseCo-master/tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import warnings
from logging import log
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.models import build_detector
from mmdet.utils import collect_env
from ssod.apis import get_root_logger, set_random_seed, train_detector
from ssod.datasets import build_dataset
from ssod.utils import patch_config
import ipdb
def parse_args():
parser = argparse.ArgumentParser(description="Train a detector")
parser.add_argument("config", help="train config file path")
parser.add_argument("--work-dir", help="the dir to save logs and models")
parser.add_argument("--resume-from", help="the checkpoint file to resume from")
parser.add_argument(
"--no-validate",
action="store_true",
help="whether not to evaluate the checkpoint during training",
)
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
"--gpus",
type=int,
help="number of gpus to use " "(only applicable to non-distributed training)",
)
group_gpus.add_argument(
"--gpu-ids",
type=int,
nargs="+",
help="ids of gpus to use " "(only applicable to non-distributed training)",
)
parser.add_argument("--seed", type=int, default=None, help="random seed")
parser.add_argument(
"--deterministic",
action="store_true",
help="whether to set deterministic options for CUDNN backend.",
)
parser.add_argument(
"--options",
nargs="+",
action=DictAction,
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
parser.add_argument(
"--cfg-options",
nargs="+",
action=DictAction,
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file. If the value to "
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
"Note that the quotation marks are necessary and that no white space "
"is allowed.",
)
parser.add_argument(
"--launcher",
choices=["none", "pytorch", "slurm", "mpi"],
default="none",
help="job launcher",
)
parser.add_argument("--local_rank", type=int, default=0)
args = parser.parse_args()
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
"--options and --cfg-options cannot be both "
"specified, --options is deprecated in favor of --cfg-options"
)
if args.options:
warnings.warn("--options is deprecated in favor of --cfg-options")
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get("custom_imports", None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg["custom_imports"])
# set cudnn_benchmark
if cfg.get("cudnn_benchmark", False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get("work_dir", None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join(
"./work_dirs", osp.splitext(osp.basename(args.config))[0]
)
cfg = patch_config(cfg)
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == "none":
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
log_file = osp.join(cfg.work_dir, f"{timestamp}.log")
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()])
dash_line = "-" * 60 + "\n"
logger.info(logger.handlers)
logger.info("Environment info:\n" + dash_line + env_info + "\n" + dash_line)
meta["env_info"] = env_info
meta["config"] = cfg.pretty_text
# log some basic info
logger.info(f"Distributed training: {distributed}")
logger.info(f"Config:\n{cfg.pretty_text}")
# set random seeds
if args.seed is not None:
logger.info(
f"Set random seed to {args.seed}, " f"deterministic: {args.deterministic}"
)
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta["seed"] = args.seed
meta["exp_name"] = osp.basename(args.config)
model = build_detector(
cfg.model, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg")
)
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7], CLASSES=datasets[0].CLASSES
)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta,
)
if __name__ == "__main__":
main()
| 7,046 | 34.41206 | 87 | py |
PseCo | PseCo-master/tools/misc/browse_dataset.py | import argparse
import os
from pathlib import Path
import mmcv
import torch
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from ssod.datasets import build_dataset
from ssod.models.utils import Transform2D
def parse_args():
parser = argparse.ArgumentParser(description="Browse a dataset")
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--skip-type",
type=str,
nargs="+",
default=["DefaultFormatBundle", "Normalize", "Collect"],
help="skip some useless pipeline",
)
parser.add_argument(
"--output-dir",
default=None,
type=str,
help="If there is no display interface, you can save it",
)
parser.add_argument("--not-show", default=False, action="store_true")
parser.add_argument(
"--show-interval", type=float, default=2, help="the interval of show (s)"
)
parser.add_argument(
"--cfg-options",
nargs="+",
action=DictAction,
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file. If the value to "
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
"Note that the quotation marks are necessary and that no white space "
"is allowed.",
)
args = parser.parse_args()
return args
def remove_pipe(pipelines, skip_type):
if isinstance(pipelines, list):
new_pipelines = []
for pipe in pipelines:
pipe = remove_pipe(pipe, skip_type)
if pipe is not None:
new_pipelines.append(pipe)
return new_pipelines
elif isinstance(pipelines, dict):
if pipelines["type"] in skip_type:
return None
elif pipelines["type"] == "MultiBranch":
new_pipelines = {}
for k, v in pipelines.items():
if k != "type":
new_pipelines[k] = remove_pipe(v, skip_type)
else:
new_pipelines[k] = v
return new_pipelines
else:
return pipelines
else:
raise NotImplementedError()
def retrieve_data_cfg(config_path, skip_type, cfg_options):
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get("custom_imports", None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg["custom_imports"])
train_data_cfg = cfg.data.train
while "dataset" in train_data_cfg:
train_data_cfg = train_data_cfg["dataset"]
train_data_cfg["pipeline"] = remove_pipe(train_data_cfg["pipeline"], skip_type)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
if not isinstance(item, list):
item = [item]
bboxes = []
labels = []
tran_mats = []
out_shapes = []
for it in item:
trans_matrix = it["transform_matrix"]
bbox = it["gt_bboxes"]
tran_mats.append(trans_matrix)
bboxes.append(bbox)
labels.append(it["gt_labels"])
out_shapes.append(it["img_shape"])
filename = (
os.path.join(args.output_dir, Path(it["filename"]).name)
if args.output_dir is not None
else None
)
gt_masks = it.get("gt_masks", None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
it["img"],
it["gt_bboxes"],
it["gt_labels"],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61),
)
if len(tran_mats) == 2:
# check equality between different augmentation
transed_bboxes = Transform2D.transform_bboxes(
torch.from_numpy(bboxes[1]).float(),
torch.from_numpy(tran_mats[0]).float()
@ torch.from_numpy(tran_mats[1]).float().inverse(),
out_shapes[0],
)
img = imshow_det_bboxes(
item[0]["img"],
item[0]["gt_bboxes"],
item[0]["gt_labels"],
class_names=dataset.CLASSES,
show=False,
wait_time=args.show_interval,
out_file=None,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61),
)
imshow_det_bboxes(
img,
transed_bboxes.numpy(),
labels[1],
class_names=dataset.CLASSES,
show=True,
wait_time=args.show_interval,
out_file=None,
bbox_color=(0, 0, 255),
text_color=(0, 0, 255),
thickness=5,
)
progress_bar.update()
if __name__ == "__main__":
main()
| 5,583 | 31.091954 | 83 | py |
PseCo | PseCo-master/ssod/apis/train.py | import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (
HOOKS,
DistSamplerSeedHook,
EpochBasedRunner,
Fp16OptimizerHook,
OptimizerHook,
build_optimizer,
build_runner,
)
from mmcv.runner.hooks import HOOKS
from mmcv.utils import build_from_cfg
from mmdet.core import EvalHook
from mmdet.datasets import build_dataset, replace_ImageToTensor
from ssod.datasets import build_dataloader
from ssod.utils import find_latest_checkpoint, get_root_logger, patch_runner
from ssod.utils.hooks import DistEvalHook
import ipdb
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_detector(
model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None
):
logger = get_root_logger(log_level=cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
if "imgs_per_gpu" in cfg.data:
logger.warning(
'"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead'
)
if "samples_per_gpu" in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f"={cfg.data.imgs_per_gpu} is used in this experiments"
)
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f"{cfg.data.imgs_per_gpu} in this experiments"
)
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
sampler_cfg=cfg.data.get("sampler", {}).get("train", {}),
)
for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get("find_unused_parameters", False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters,
)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if "runner" not in cfg:
cfg.runner = {"type": "EpochBasedRunner", "max_epochs": cfg.total_epochs}
warnings.warn(
"config is now expected to have a `runner` section, "
"please set `runner` in your config.",
UserWarning,
)
else:
if "total_epochs" in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta,
),
)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed
)
elif distributed and "type" not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get("momentum_config", None),
)
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop("samples_per_gpu", 1)
if val_samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
)
eval_cfg = cfg.get("evaluation", {})
eval_cfg["by_epoch"] = eval_cfg.get(
"by_epoch", cfg.runner["type"] != "IterBasedRunner"
)
if "type" not in eval_cfg:
eval_hook = DistEvalHook if distributed else EvalHook
eval_hook = eval_hook(val_dataloader, **eval_cfg)
else:
eval_hook = build_from_cfg(
eval_cfg, HOOKS, default_args=dict(dataloader=val_dataloader)
)
runner.register_hook(eval_hook, priority=80)
# user-defined hooks
if cfg.get("custom_hooks", None):
custom_hooks = cfg.custom_hooks
assert isinstance(
custom_hooks, list
), f"custom_hooks expect list type, but got {type(custom_hooks)}"
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), (
"Each item in custom_hooks expects dict type, but got "
f"{type(hook_cfg)}"
)
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop("priority", "NORMAL")
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
runner = patch_runner(runner)
resume_from = None
if cfg.get("auto_resume", True):
resume_from = find_latest_checkpoint(cfg.work_dir)
if resume_from is not None:
cfg.resume_from = resume_from
if cfg.resume_from:
runner.resume(cfg.resume_from, resume_optimizer=False) # resume_optimizer=False
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from, revise_keys=[]) # (r'^', 'student.')
runner.run(data_loaders, cfg.workflow)
| 7,063 | 32.961538 | 92 | py |
PseCo | PseCo-master/ssod/core/masks/structures.py | """
Designed for pseudo masks.
In a `TrimapMasks`, it allow some part of the mask is ignored when computing loss.
"""
import numpy as np
import torch
from mmcv.ops.roi_align import roi_align
from mmdet.core import BitmapMasks
class TrimapMasks(BitmapMasks):
def __init__(self, masks, height, width, ignore_value=255):
"""
Args:
ignore_value: flag to ignore in loss computation.
See `mmdet.core.BitmapMasks` for more information
"""
super().__init__(masks, height, width)
self.ignore_value = ignore_value
def crop_and_resize(
self, bboxes, out_shape, inds, device="cpu", interpolation="bilinear"
):
"""See :func:`BaseInstanceMasks.crop_and_resize`."""
if len(self.masks) == 0:
empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
return BitmapMasks(empty_masks, *out_shape)
# convert bboxes to tensor
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(device=device)
if isinstance(inds, np.ndarray):
inds = torch.from_numpy(inds).to(device=device)
num_bbox = bboxes.shape[0]
fake_inds = torch.arange(num_bbox, device=device).to(dtype=bboxes.dtype)[
:, None
]
rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
rois = rois.to(device=device)
if num_bbox > 0:
gt_masks_th = (
torch.from_numpy(self.masks)
.to(device)
.index_select(0, inds)
.to(dtype=rois.dtype)
)
targets = roi_align(
gt_masks_th[:, None, :, :], rois, out_shape, 1.0, 0, "avg", True
).squeeze(1)
# for a mask:
# value<0.5 -> background,
# 0.5<=value<=1 -> foreground
# value>1 -> ignored area
resized_masks = (targets >= 0.5).float()
resized_masks[targets > 1] = self.ignore_value
resized_masks = resized_masks.cpu().numpy()
else:
resized_masks = []
return BitmapMasks(resized_masks, *out_shape)
| 2,157 | 34.377049 | 82 | py |
PseCo | PseCo-master/ssod/models/PseCo_frcnn.py | import copy
import os.path as osp
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import mmcv
from mmcv.runner.fp16_utils import force_fp32
from mmcv.cnn import normal_init
from mmcv.ops import batched_nms
from mmdet.core import bbox2roi, multi_apply, merge_aug_proposals, bbox_mapping, bbox_mapping_back, bbox_overlaps, build_assigner
from mmdet.models import BaseDetector, TwoStageDetector, DETECTORS, build_detector
from mmdet.models.builder import build_loss
from ssod.utils.structure_utils import dict_split, weighted_loss
from ssod.datasets.pipelines.rand_aug import visualize_bboxes
from .multi_stream_detector import MultiSteamDetector
from .utils import (Transform2D, filter_invalid, filter_invalid_classwise, concat_all_gather,
filter_invalid_scalewise, resize_image, get_pseudo_label_quality)
import random
import time
import os
import ipdb
@DETECTORS.register_module()
class PseCo_FRCNN(MultiSteamDetector):
""" PseCo on FR-CNN.
"""
def __init__(self, model: dict, train_cfg=None, test_cfg=None):
super(PseCo_FRCNN, self).__init__(
dict(teacher=build_detector(model), student=build_detector(model)),
train_cfg=train_cfg,
test_cfg=test_cfg,
)
if train_cfg is not None:
self.freeze("teacher")
self.unsup_weight = self.train_cfg.unsup_weight
self.register_buffer("precision", torch.zeros(1))
self.register_buffer("recall", torch.zeros(1))
# initialize assignment to build condidate bags
self.PLA_iou_thres = self.train_cfg.get("PLA_iou_thres", 0.4)
initial_assigner_cfg=dict(
type='MaxIoUAssigner',
pos_iou_thr=self.PLA_iou_thres,
neg_iou_thr=self.PLA_iou_thres,
match_low_quality=False,
ignore_iof_thr=-1)
self.initial_assigner = build_assigner(initial_assigner_cfg)
self.PLA_candidate_topk = self.train_cfg.PLA_candidate_topk
self.use_teacher_proposal = self.train_cfg.use_teacher_proposal
self.use_MSL = self.train_cfg.use_MSL
if self.student.roi_head.bbox_head.use_sigmoid:
self.use_sigmoid = True
else:
self.use_sigmoid = False
self.num_classes = self.student.roi_head.bbox_head.num_classes
def forward_train(self, imgs, img_metas, **kwargs):
super().forward_train(imgs, img_metas, **kwargs)
kwargs.update({"img": imgs})
kwargs.update({"img_metas": img_metas})
kwargs.update({"tag": [meta["tag"] for meta in img_metas]})
data_groups = dict_split(kwargs, "tag")
for _, v in data_groups.items():
v.pop("tag")
loss = {}
#! Warnings: By splitting losses for supervised data and unsupervised data with different names,
#! it means that at least one sample for each group should be provided on each gpu.
#! In some situation, we can only put one image per gpu, we have to return the sum of loss
#! and log the loss with logger instead. Or it will try to sync tensors don't exist.
if "sup" in data_groups:
gt_bboxes = data_groups["sup"]["gt_bboxes"]
sup_loss = self.forward_sup_train(**data_groups["sup"])
sup_loss = {"sup_" + k: v for k, v in sup_loss.items()}
loss.update(**sup_loss)
if "unsup_student" in data_groups:
unsup_loss = self.foward_unsup_train(
data_groups["unsup_teacher"], data_groups["unsup_student"])
unsup_loss = weighted_loss(
unsup_loss,
weight=self.unsup_weight,
)
unsup_loss = {"unsup_" + k: v for k, v in unsup_loss.items()}
loss.update(**unsup_loss)
return loss
def extract_feat(self, img, model, start_lvl=0):
"""Directly extract features from the backbone+neck."""
assert start_lvl in [0, 1], \
f"start level {start_lvl} is not supported."
x = model.backbone(img)
# global feature -- [p2, p3, p4, p5, p6, p7]
if model.with_neck:
x = model.neck(x)
if start_lvl == 0:
return x[:-1]
elif start_lvl == 1:
return x[1:]
def forward_sup_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
forward training process for the labeled data.
"""
losses = dict()
# high resolution
x = self.extract_feat(img, self.student, start_lvl=1)
# RPN forward and loss
if self.student.with_rpn:
proposal_cfg = self.student.train_cfg.get('rpn_proposal',
self.student.test_cfg.rpn)
rpn_losses, proposal_list = self.student.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
# RCNN forward and loss
roi_losses = self.student.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
return losses
def foward_unsup_train(self, teacher_data, student_data):
teacher_img = teacher_data["img"]
student_img = student_data["img"]
img_metas_teacher = teacher_data["img_metas"]
img_metas_student = student_data["img_metas"]
gt_bboxes, gt_labels = teacher_data["gt_bboxes"], teacher_data["gt_labels"]
if len(img_metas_student) > 1:
tnames = [meta["filename"] for meta in img_metas_teacher]
snames = [meta["filename"] for meta in img_metas_student]
tidx = [tnames.index(name) for name in snames]
teacher_img = teacher_img[torch.Tensor(tidx).to(teacher_img.device).long()]
img_metas_teacher = [img_metas_teacher[idx] for idx in tidx]
det_bboxes, det_labels, tea_proposals_tuple = self.extract_teacher_info(
teacher_img, img_metas_teacher)
tea_proposals, tea_feats = tea_proposals_tuple
tea_proposals_copy = copy.deepcopy(tea_proposals) # proposals before geometry transform
pseudo_bboxes = self.convert_bbox_space(img_metas_teacher,
img_metas_student, det_bboxes)
tea_proposals = self.convert_bbox_space(img_metas_teacher,
img_metas_student, tea_proposals)
gt_bboxes = self.convert_bbox_space(img_metas_teacher,
img_metas_student, gt_bboxes)
pseudo_labels = det_labels
loss = {}
# RPN stage
feats = self.extract_feat(student_img, self.student, start_lvl=1)
stu_rpn_outs, rpn_losses = self.unsup_rpn_loss(
feats, pseudo_bboxes, pseudo_labels, img_metas_student)
loss.update(rpn_losses)
if self.use_MSL:
# construct View 2 to learn feature-level scale invariance
img_ds = resize_image(student_img) # downsampled images
feats_ds = self.extract_feat(img_ds, self.student, start_lvl=0)
_, rpn_losses_ds = self.unsup_rpn_loss(feats_ds,
pseudo_bboxes, pseudo_labels,
img_metas_student)
for key, value in rpn_losses_ds.items():
loss[key + "_V2"] = value
# RCNN stage
""" obtain proposals """
if self.use_teacher_proposal:
proposal_list = tea_proposals
else :
proposal_cfg = self.student.train_cfg.get(
"rpn_proposal", self.student.test_cfg.rpn
)
proposal_list = self.student.rpn_head.get_bboxes(
*stu_rpn_outs, img_metas_student, cfg=proposal_cfg
)
""" obtain teacher predictions for all proposals """
with torch.no_grad():
rois_ = bbox2roi(tea_proposals_copy)
tea_bbox_results = self.teacher.roi_head._bbox_forward(
tea_feats, rois_)
teacher_infos = {
"imgs": teacher_img,
"cls_score": tea_bbox_results["cls_score"].sigmoid() if self.use_sigmoid \
else tea_bbox_results["cls_score"][:, :self.num_classes].softmax(dim=-1),
"bbox_pred": tea_bbox_results["bbox_pred"],
"feats": tea_feats,
"img_metas": img_metas_teacher,
"proposal_list": tea_proposals_copy}
rcnn_losses = self.unsup_rcnn_cls_loss(
feats,
feats_ds if self.use_MSL else None,
img_metas_student,
proposal_list,
pseudo_bboxes,
pseudo_labels,
GT_bboxes=gt_bboxes,
GT_labels=gt_labels,
teacher_infos=teacher_infos)
loss.update(rcnn_losses)
loss["precision"] = self.precision
loss["recall"] = self.recall
return loss
def unsup_rpn_loss(self, stu_feats, pseudo_bboxes, pseudo_labels, img_metas):
stu_rpn_outs = self.student.rpn_head(stu_feats)
# rpn loss
gt_bboxes_rpn = []
for bbox, label in zip(pseudo_bboxes, pseudo_labels):
bbox, label, _ = filter_invalid(
bbox[:, :4],
label=label,
score=bbox[
:, 4
], # TODO: replace with foreground score, here is classification score,
thr=self.train_cfg.rpn_pseudo_threshold,
min_size=self.train_cfg.min_pseduo_box_size,
)
gt_bboxes_rpn.append(bbox)
stu_rpn_loss_inputs = stu_rpn_outs + ([bbox.float() for bbox in gt_bboxes_rpn], img_metas)
rpn_losses = self.student.rpn_head.loss(*stu_rpn_loss_inputs)
return stu_rpn_outs, rpn_losses
def unsup_rcnn_cls_loss(self,
feat,
feat_V2,
img_metas,
proposal_list,
pseudo_bboxes,
pseudo_labels,
GT_bboxes=None,
GT_labels=None,
teacher_infos=None):
gt_bboxes, gt_labels, _ = multi_apply(
filter_invalid,
[bbox[:, :4] for bbox in pseudo_bboxes],
pseudo_labels,
[bbox[:, 4] for bbox in pseudo_bboxes],
thr=self.train_cfg.cls_pseudo_threshold)
# quality of pseudo label
precision, recall = get_pseudo_label_quality(
gt_bboxes, gt_labels, GT_bboxes, GT_labels)
self.precision = 0.9 * self.precision + 0.1 * precision
self.recall = 0.9 * self.recall + 0.1 * recall
sampling_results = self.prediction_guided_label_assign(
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
teacher_infos=teacher_infos)
selected_bboxes = [res.bboxes[:, :4] for res in sampling_results]
pos_inds_list = [res.pos_inds for res in sampling_results]
neg_inds_list = [res.neg_inds for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_assigned_gt_inds_list = [res.pos_assigned_gt_inds for res in sampling_results]
bbox_targets = self.student.roi_head.bbox_head.get_targets(
sampling_results, gt_bboxes, gt_labels, self.student.train_cfg.rcnn
)
labels = bbox_targets[0]
rois = bbox2roi(selected_bboxes)
bbox_results = self.student.roi_head._bbox_forward(feat, rois)
bbox_weights = self.compute_PCV(
bbox_results["bbox_pred"],
labels,
selected_bboxes,
pos_gt_bboxes_list,
pos_assigned_gt_inds_list)
bbox_weights_ = bbox_weights.pow(2.0)
pos_inds = (labels >= 0) & (labels < self.student.roi_head.bbox_head.num_classes)
if pos_inds.any():
reg_scale_factor = bbox_weights.sum() / bbox_weights_.sum()
else:
reg_scale_factor = 0.0
# Focal loss
loss = self.student.roi_head.bbox_head.loss(
bbox_results["cls_score"],
bbox_results["bbox_pred"],
rois,
*(bbox_targets[:3]),
bbox_weights_,
reduction_override="none",
)
loss["loss_cls"] = loss["loss_cls"].sum() / max(bbox_targets[1].sum(), 1.0)
loss["loss_bbox"] = reg_scale_factor * loss["loss_bbox"].sum() / max(
bbox_targets[1].size()[0], 1.0)
if feat_V2 is not None:
bbox_results_V2 = self.student.roi_head._bbox_forward(feat_V2, rois)
loss_V2 = self.student.roi_head.bbox_head.loss(
bbox_results_V2["cls_score"],
bbox_results_V2["bbox_pred"],
rois,
*(bbox_targets[:3]),
bbox_weights_,
reduction_override="none",
)
loss["loss_cls_V2"] = loss_V2["loss_cls"].sum() / max(bbox_targets[1].sum(), 1.0)
loss["loss_bbox_V2"] = reg_scale_factor * loss_V2["loss_bbox"].sum() / max(
bbox_targets[1].size()[0], 1.0)
if "acc" in loss_V2:
loss["acc_V2"] = loss_V2["acc"]
# print scores of positive proposals (analysis only)
tea_cls_score = teacher_infos["cls_score"]
num_proposal = [proposal.shape[0] for proposal in proposal_list]
tea_cls_score_list = tea_cls_score.split(num_proposal, dim=0) # tensor to list
tea_pos_score = []
for score, pos in zip(tea_cls_score_list, pos_inds_list):
tea_pos_score.append(score[pos])
tea_pos_score = torch.cat(tea_pos_score, dim=0)
with torch.no_grad():
if pos_inds.any():
max_score = tea_pos_score[torch.arange(tea_pos_score.shape[0]), labels[pos_inds]].float()
pos_score_mean = max_score.mean()
pos_score_min = max_score.min()
else:
max_score = tea_cls_score.sum().float() * 0
pos_score_mean = tea_cls_score.sum().float() * 0
pos_score_min = tea_cls_score.sum().float() * 0
loss["tea_pos_score_mean"] = pos_score_mean
loss["tea_pos_score_min"] = pos_score_min
loss['cls_score_thr'] = torch.tensor(self.train_cfg.cls_pseudo_threshold,
dtype=torch.float,
device=labels.device)
loss["pos_number"] = pos_inds.sum().float()
return loss
def extract_teacher_info(self, img, img_metas):
feat = self.extract_feat(img, self.teacher, start_lvl=1)
proposal_cfg = self.teacher.train_cfg.get(
"rpn_proposal", self.teacher.test_cfg.rpn
)
rpn_out = list(self.teacher.rpn_head(feat))
proposal_list = self.teacher.rpn_head.get_bboxes(
*rpn_out, img_metas, cfg=proposal_cfg
)
# teacher proposals
proposals = copy.deepcopy(proposal_list)
proposal_list, proposal_label_list = \
self.teacher.roi_head.simple_test_bboxes(
feat, img_metas, proposal_list,
self.teacher.test_cfg.rcnn,
rescale=False
) # obtain teacher predictions
proposal_list = [p.to(feat[0].device) for p in proposal_list]
proposal_list = [
p if p.shape[0] > 0 else p.new_zeros(0, 5) for p in proposal_list
]
proposal_label_list = [p.to(feat[0].device) for p in proposal_label_list]
# filter invalid box roughly
if isinstance(self.train_cfg.pseudo_label_initial_score_thr, float):
thr = self.train_cfg.pseudo_label_initial_score_thr
else:
# TODO: use dynamic threshold
raise NotImplementedError("Dynamic Threshold is not implemented yet.")
proposal_list, proposal_label_list, _ = list(
zip(
*[
filter_invalid(
proposal,
proposal_label,
proposal[:, -1],
thr=thr,
min_size=self.train_cfg.min_pseduo_box_size,
)
for proposal, proposal_label in zip(
proposal_list, proposal_label_list
)
]
)
)
det_bboxes = proposal_list
return det_bboxes, proposal_label_list, \
(proposals, feat)
@torch.no_grad()
def compute_PCV(self,
bbox_preds,
labels,
proposal_list,
pos_gt_bboxes_list,
pos_assigned_gt_inds_list):
""" Compute regression weights for each proposal according
to Positive-proposal Consistency Voting (PCV).
Args:
bbox_pred (Tensors): bbox preds for proposals.
labels (Tensors): assigned class label for each proposals.
0-79 indicate fg, 80 indicates bg.
propsal_list tuple[Tensor]: proposals for each image.
pos_gt_bboxes_list, pos_assigned_gt_inds_list tuple[Tensor]: label assignent results
Returns:
bbox_weights (Tensors): Regression weights for proposals.
"""
nums = [_.shape[0] for _ in proposal_list]
labels = labels.split(nums, dim=0)
bbox_preds = bbox_preds.split(nums, dim=0)
bbox_weights_list = []
for bbox_pred, label, proposals, pos_gt_bboxes, pos_assigned_gt_inds in zip(
bbox_preds, labels, proposal_list, pos_gt_bboxes_list, pos_assigned_gt_inds_list):
pos_inds = ((label >= 0) &
(label < self.student.roi_head.bbox_head.num_classes)).nonzero().reshape(-1)
bbox_weights = proposals.new_zeros(bbox_pred.shape[0], 4)
pos_proposals = proposals[pos_inds]
if len(pos_inds):
pos_bbox_weights = proposals.new_zeros(pos_inds.shape[0], 4)
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1, 4)[
pos_inds, label[pos_inds]
]
decoded_bboxes = self.student.roi_head.bbox_head.bbox_coder.decode(
pos_proposals, pos_bbox_pred)
gt_inds_set = torch.unique(pos_assigned_gt_inds)
IoUs = bbox_overlaps(
decoded_bboxes,
pos_gt_bboxes,
is_aligned=True)
for gt_ind in gt_inds_set:
idx_per_gt = (pos_assigned_gt_inds == gt_ind).nonzero().reshape(-1)
if idx_per_gt.shape[0] > 0:
pos_bbox_weights[idx_per_gt] = IoUs[idx_per_gt].mean()
bbox_weights[pos_inds] = pos_bbox_weights
bbox_weights_list.append(bbox_weights)
bbox_weights = torch.cat(bbox_weights_list, 0)
return bbox_weights
@torch.no_grad()
def prediction_guided_label_assign(
self,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
teacher_infos,
gt_bboxes_ignore=None,
):
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
# get teacher predictions (including cls scores and bbox ious)
tea_proposal_list = teacher_infos["proposal_list"]
tea_cls_score_concat = teacher_infos["cls_score"]
tea_bbox_pred_concat = teacher_infos["bbox_pred"]
num_per_img = [_.shape[0] for _ in tea_proposal_list]
tea_cls_scores = tea_cls_score_concat.split(num_per_img, dim=0)
tea_bbox_preds = tea_bbox_pred_concat.split(num_per_img, dim=0)
decoded_bboxes_list = []
for bbox_preds, cls_scores, proposals in zip(tea_bbox_preds, tea_cls_scores, tea_proposal_list):
pred_labels = cls_scores.max(dim=-1)[1]
bbox_preds_ = bbox_preds.view(
bbox_preds.size(0), -1,
4)[torch.arange(bbox_preds.size(0)), pred_labels]
decode_bboxes = self.student.roi_head.bbox_head.bbox_coder.decode(
proposals, bbox_preds_)
decoded_bboxes_list.append(decode_bboxes)
decoded_bboxes_list = self.convert_bbox_space(
teacher_infos['img_metas'],
img_metas,
decoded_bboxes_list)
sampling_results = []
for i in range(num_imgs):
assign_result = self.initial_assigner.assign(
decoded_bboxes_list[i],
gt_bboxes[i],
gt_bboxes_ignore[i],
gt_labels[i])
gt_inds = assign_result.gt_inds
pos_inds = torch.nonzero(gt_inds > 0, as_tuple=False).reshape(-1)
assigned_gt_inds = gt_inds - 1
pos_assigned_gt_inds = assigned_gt_inds[pos_inds]
pos_labels = gt_labels[i][pos_assigned_gt_inds]
tea_pos_cls_score = tea_cls_scores[i][pos_inds]
tea_pos_bboxes = decoded_bboxes_list[i][pos_inds]
ious = bbox_overlaps(tea_pos_bboxes, gt_bboxes[i])
wh = proposal_list[i][:, 2:4] - proposal_list[i][:, :2]
areas = wh.prod(dim=-1)
pos_areas = areas[pos_inds]
refined_gt_inds = self.assignment_refinement(gt_inds,
pos_inds,
pos_assigned_gt_inds,
ious,
tea_pos_cls_score,
pos_areas,
pos_labels)
assign_result.gt_inds = refined_gt_inds + 1
sampling_result = self.student.roi_head.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i])
sampling_results.append(sampling_result)
return sampling_results
@torch.no_grad()
def assignment_refinement(self, gt_inds, pos_inds, pos_assigned_gt_inds,
ious, cls_score, areas, labels):
# (PLA) refine assignment results according to teacher predictions
# on each image
refined_gt_inds = gt_inds.new_full((gt_inds.shape[0], ), -1)
refined_pos_gt_inds = gt_inds.new_full((pos_inds.shape[0],), -1)
gt_inds_set = torch.unique(pos_assigned_gt_inds)
for gt_ind in gt_inds_set:
pos_idx_per_gt = torch.nonzero(pos_assigned_gt_inds == gt_ind).reshape(-1)
target_labels = labels[pos_idx_per_gt]
target_scores = cls_score[pos_idx_per_gt, target_labels]
target_areas = areas[pos_idx_per_gt]
target_IoUs = ious[pos_idx_per_gt, gt_ind]
cost = (target_IoUs * target_scores).sqrt()
_, sort_idx = torch.sort(cost, descending=True)
candidate_topk = min(pos_idx_per_gt.shape[0], self.PLA_candidate_topk)
topk_ious, _ = torch.topk(target_IoUs, candidate_topk, dim=0)
# calculate dynamic k for each gt
dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)
sort_idx = sort_idx[:dynamic_ks]
# filter some invalid (area == 0) proposals
sort_idx = sort_idx[
target_areas[sort_idx] > 0
]
pos_idx_per_gt = pos_idx_per_gt[sort_idx]
refined_pos_gt_inds[pos_idx_per_gt] = pos_assigned_gt_inds[pos_idx_per_gt]
refined_gt_inds[pos_inds] = refined_pos_gt_inds
return refined_gt_inds
def forward_test(self, imgs, img_metas, **kwargs):
return super(MultiSteamDetector, self).forward_test(imgs, img_metas, **kwargs)
def aug_test(self, imgs, img_metas, **kwargs):
model: TwoStageDetector = getattr(self, 'model')
return model.aug_test(imgs, img_metas, **kwargs)
def simple_test(self, img, img_metas, proposals=None, rescale=False, **kwargs):
"""Test without augmentation."""
model = self.model(**kwargs)
assert model.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img, model, start_lvl=1)
if proposals is None:
proposal_list = model.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return model.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale)
@force_fp32(apply_to=["bboxes", "trans_mat"])
def _transform_bbox(self, bboxes, trans_mat, max_shape):
bboxes = Transform2D.transform_bboxes(bboxes, trans_mat, max_shape)
return bboxes
@force_fp32(apply_to=["a", "b"])
def _get_trans_mat(self, a, b):
return [bt @ at.inverse() for bt, at in zip(b, a)]
def convert_bbox_space(self, img_metas_A, img_metas_B, bboxes_A):
"""
function: convert bboxes_A from space A into space B
Parameters:
img_metas: list(dict); bboxes_A: list(tensors)
"""
transMat_A = [torch.from_numpy(meta["transform_matrix"]).float().to(bboxes_A[0].device)
for meta in img_metas_A]
transMat_B = [torch.from_numpy(meta["transform_matrix"]).float().to(bboxes_A[0].device)
for meta in img_metas_B]
M = self._get_trans_mat(transMat_A, transMat_B)
bboxes_B = self._transform_bbox(
bboxes_A,
M,
[meta["img_shape"] for meta in img_metas_B],
)
return bboxes_B
| 27,527 | 40.645991 | 129 | py |
PseCo | PseCo-master/ssod/models/utils/gather.py | import torch
import torch.distributed as dist
@torch.no_grad()
def concat_all_gather(tensor):
# gather all tensor shape
shape_tensor = torch.tensor(tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(dist.get_world_size())]
dist.all_gather(shape_list, shape_tensor)
# padding tensor to the max length
if shape_list[0].numel() > 1:
max_shape = torch.tensor([_[0] for _ in shape_list]).max()
padding_tensor = torch.zeros((max_shape, shape_tensor[1]), device='cuda').type_as(tensor)
else:
max_shape = torch.tensor(shape_list).max()
padding_tensor = torch.zeros(max_shape, device='cuda').type_as(tensor)
padding_tensor[:shape_tensor[0]] = tensor
tensor_list = [torch.zeros_like(padding_tensor) for _ in range(dist.get_world_size())]
dist.all_gather(tensor_list, padding_tensor)
sub_tensor_list = []
for sub_tensor, sub_shape in zip(tensor_list, shape_list):
sub_tensor_list.append(sub_tensor[:sub_shape[0]])
output = torch.cat(sub_tensor_list, dim=0)
return output | 1,111 | 36.066667 | 97 | py |
PseCo | PseCo-master/ssod/models/utils/bbox_utils.py | import warnings
from collections.abc import Sequence
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from mmdet.core.mask.structures import BitmapMasks
from torch.nn import functional as F
from mmcv.runner.fp16_utils import force_fp32
import ipdb
def resize_image(inputs, resize_ratio=0.5):
down_inputs = F.interpolate(inputs,
scale_factor=resize_ratio,
mode='nearest')
return down_inputs
def evaluate_pseudo_label(det_bboxes, det_labels, gt_bboxes,
gt_labels, thres=0.5):
""" Perform evaluation on pseudo boxes.
"""
area1 = (det_bboxes[:, 2:4] - det_bboxes[:, 0:2]).prod(dim=1)
area2 = (gt_bboxes[:, 2:4] - gt_bboxes[:, 0:2]).prod(dim=1)
lt = torch.max(det_bboxes[:, None, :2], gt_bboxes[None, :, :2])
rb = torch.min(det_bboxes[:, None, 2:4], gt_bboxes[None, :, 2:4])
wh = torch.clamp(rb - lt, min=0)
overlaps = wh[..., 0] * wh[..., 1]
ious = overlaps / (area1[:, None] + area2[None, :] - overlaps + 1e-8)
max_iou, argmax_iou = ious.max(dim=1)
flags = (max_iou > thres) & (det_labels == gt_labels[argmax_iou])
return flags
def get_pseudo_label_quality(det_bboxes, det_labels, gt_bboxes, gt_labels):
""" precision and recall of pseudo labels.
"""
TPs = []
for det_bbox, det_label, gt_bbox, gt_label in \
zip(det_bboxes, det_labels, gt_bboxes, gt_labels):
if det_bbox.shape[0] == 0 or gt_bbox.shape[0] == 0:
pass
else:
TPs.append(evaluate_pseudo_label(det_bbox, det_label,
gt_bbox, gt_label))
if torch.cat(det_bboxes, dim=0).shape[0] > 0 and len(TPs) > 0:
TPs = torch.cat(TPs, dim=0)
num_tp, num_fp = TPs.sum(), (~TPs).sum()
num_gts = sum([gt_bbox.shape[0] for gt_bbox in gt_bboxes])
precision = num_tp / (num_tp + num_fp)
recall = num_tp / torch.tensor(num_gts, dtype=num_tp.dtype, device=num_tp.device)
else:
precision = 0
recall = 0
return precision, recall
def bbox2points(box):
min_x, min_y, max_x, max_y = torch.split(box[:, :4], [1, 1, 1, 1], dim=1)
return torch.cat(
[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y], dim=1
).reshape(
-1, 2
) # n*4,2
def points2bbox(point, max_w, max_h):
point = point.reshape(-1, 4, 2)
if point.size()[0] > 0:
min_xy = point.min(dim=1)[0]
max_xy = point.max(dim=1)[0]
xmin = min_xy[:, 0].clamp(min=0, max=max_w)
ymin = min_xy[:, 1].clamp(min=0, max=max_h)
xmax = max_xy[:, 0].clamp(min=0, max=max_w)
ymax = max_xy[:, 1].clamp(min=0, max=max_h)
min_xy = torch.stack([xmin, ymin], dim=1)
max_xy = torch.stack([xmax, ymax], dim=1)
return torch.cat([min_xy, max_xy], dim=1) # n,4
else:
return point.new_zeros(0, 4)
def check_is_tensor(obj):
"""Checks whether the supplied object is a tensor."""
if not isinstance(obj, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(obj)))
def normal_transform_pixel(
height: int,
width: int,
eps: float = 1e-14,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
tr_mat = torch.tensor(
[[1.0, 0.0, -1.0], [0.0, 1.0, -1.0], [0.0, 0.0, 1.0]],
device=device,
dtype=dtype,
) # 3x3
# prevent divide by zero bugs
width_denom: float = eps if width == 1 else width - 1.0
height_denom: float = eps if height == 1 else height - 1.0
tr_mat[0, 0] = tr_mat[0, 0] * 2.0 / width_denom
tr_mat[1, 1] = tr_mat[1, 1] * 2.0 / height_denom
return tr_mat.unsqueeze(0) # 1x3x3
def normalize_homography(
dst_pix_trans_src_pix: torch.Tensor,
dsize_src: Tuple[int, int],
dsize_dst: Tuple[int, int],
) -> torch.Tensor:
check_is_tensor(dst_pix_trans_src_pix)
if not (
len(dst_pix_trans_src_pix.shape) == 3
or dst_pix_trans_src_pix.shape[-2:] == (3, 3)
):
raise ValueError(
"Input dst_pix_trans_src_pix must be a Bx3x3 tensor. Got {}".format(
dst_pix_trans_src_pix.shape
)
)
# source and destination sizes
src_h, src_w = dsize_src
dst_h, dst_w = dsize_dst
# compute the transformation pixel/norm for src/dst
src_norm_trans_src_pix: torch.Tensor = normal_transform_pixel(src_h, src_w).to(
dst_pix_trans_src_pix
)
src_pix_trans_src_norm = torch.inverse(src_norm_trans_src_pix.float()).to(
src_norm_trans_src_pix.dtype
)
dst_norm_trans_dst_pix: torch.Tensor = normal_transform_pixel(dst_h, dst_w).to(
dst_pix_trans_src_pix
)
# compute chain transformations
dst_norm_trans_src_norm: torch.Tensor = dst_norm_trans_dst_pix @ (
dst_pix_trans_src_pix @ src_pix_trans_src_norm
)
return dst_norm_trans_src_norm
def warp_affine(
src: torch.Tensor,
M: torch.Tensor,
dsize: Tuple[int, int],
mode: str = "bilinear",
padding_mode: str = "zeros",
align_corners: Optional[bool] = None,
) -> torch.Tensor:
if not isinstance(src, torch.Tensor):
raise TypeError(
"Input src type is not a torch.Tensor. Got {}".format(type(src))
)
if not isinstance(M, torch.Tensor):
raise TypeError("Input M type is not a torch.Tensor. Got {}".format(type(M)))
if not len(src.shape) == 4:
raise ValueError("Input src must be a BxCxHxW tensor. Got {}".format(src.shape))
if not (len(M.shape) == 3 or M.shape[-2:] == (2, 3)):
raise ValueError("Input M must be a Bx2x3 tensor. Got {}".format(M.shape))
# TODO: remove the statement below in kornia v0.6
if align_corners is None:
message: str = (
"The align_corners default value has been changed. By default now is set True "
"in order to match cv2.warpAffine."
)
warnings.warn(message)
# set default value for align corners
align_corners = True
B, C, H, W = src.size()
# we generate a 3x3 transformation matrix from 2x3 affine
dst_norm_trans_src_norm: torch.Tensor = normalize_homography(M, (H, W), dsize)
src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm.float())
grid = F.affine_grid(
src_norm_trans_dst_norm[:, :2, :],
[B, C, dsize[0], dsize[1]],
align_corners=align_corners,
)
return F.grid_sample(
src.float(),
grid,
align_corners=align_corners,
mode=mode,
padding_mode=padding_mode,
).to(src.dtype)
class Transform2D:
@staticmethod
def transform_bboxes(bbox, M, out_shape):
if isinstance(bbox, Sequence):
assert len(bbox) == len(M)
return [
Transform2D.transform_bboxes(b, m, o)
for b, m, o in zip(bbox, M, out_shape)
]
else:
if bbox.shape[0] == 0:
return bbox
score = None
if bbox.shape[1] > 4:
score = bbox[:, 4:]
points = bbox2points(bbox[:, :4])
points = torch.cat(
[points, points.new_ones(points.shape[0], 1)], dim=1
) # n,3
points = torch.matmul(M, points.t()).t()
points = points[:, :2] / points[:, 2:3]
bbox = points2bbox(points, out_shape[1], out_shape[0])
if score is not None:
return torch.cat([bbox, score], dim=1)
return bbox
@staticmethod
def transform_masks(
mask: Union[BitmapMasks, List[BitmapMasks]],
M: Union[torch.Tensor, List[torch.Tensor]],
out_shape: Union[list, List[list]],
):
if isinstance(mask, Sequence):
assert len(mask) == len(M)
return [
Transform2D.transform_masks(b, m, o)
for b, m, o in zip(mask, M, out_shape)
]
else:
if mask.masks.shape[0] == 0:
return BitmapMasks(np.zeros((0, *out_shape)), *out_shape)
mask_tensor = (
torch.from_numpy(mask.masks[:, None, ...]).to(M.device).to(M.dtype)
)
return BitmapMasks(
warp_affine(
mask_tensor,
M[None, ...].expand(mask.masks.shape[0], -1, -1),
out_shape,
)
.squeeze(1)
.cpu()
.numpy(),
out_shape[0],
out_shape[1],
)
@staticmethod
def transform_image(img, M, out_shape):
if isinstance(img, Sequence):
assert len(img) == len(M)
return [
Transform2D.transform_image(b, m, shape)
for b, m, shape in zip(img, M, out_shape)
]
else:
if img.dim() == 2:
img = img[None, None, ...]
elif img.dim() == 3:
img = img[None, ...]
return (
warp_affine(img.float(), M[None, ...], out_shape, mode="nearest")
.squeeze()
.to(img.dtype)
)
def filter_invalid(bbox, label=None, score=None, mask=None, thr=0.0, min_size=0, return_inds=False):
bbox_ = bbox.clone()
if score is not None:
valid = score > thr
bbox = bbox[valid]
if label is not None:
label = label[valid]
if mask is not None:
mask = BitmapMasks(mask.masks[valid.cpu().numpy()], mask.height, mask.width)
idx_1 = torch.nonzero(valid).reshape(-1)
if min_size is not None:
bw = bbox[:, 2] - bbox[:, 0]
bh = bbox[:, 3] - bbox[:, 1]
valid = (bw > min_size) & (bh > min_size)
bbox = bbox[valid]
if label is not None:
label = label[valid]
if mask is not None:
mask = BitmapMasks(mask.masks[valid.cpu().numpy()], mask.height, mask.width)
idx_2 = idx_1[valid]
idx = torch.zeros(bbox_.shape[0], device=idx_2.device).scatter_(
0, idx_2, torch.ones(idx_2.shape[0], device=idx_2.device)).bool()
if not return_inds:
return bbox, label, mask
else:
return bbox, label, mask, idx
def filter_invalid_classwise(bbox, label=None, score=None, class_acc=None, thr=0.0, min_size=0):
if class_acc.max() > 0:
class_acc = class_acc / class_acc.max()
thres = thr * (0.375 * class_acc[label] + 0.625)
select = score.ge(thres).bool()
bbox = bbox[select]
label = label[select]
if min_size is not None:
bw = bbox[:, 2] - bbox[:, 0]
bh = bbox[:, 3] - bbox[:, 1]
valid = (bw > min_size) & (bh > min_size)
bbox = bbox[valid]
if label is not None:
label = label[valid]
return bbox, label
def filter_invalid_scalewise(bbox, label=None, score=None, scale_acc=None, thr=0.0, min_size=0, return_inds=False):
bbox_ = bbox.clone()
bw = bbox[:, 2] - bbox[:, 0]
bh = bbox[:, 3] - bbox[:, 1]
area = bw * bh
scale_range = torch.pow(torch.linspace(0, 256, steps=9), 2)
scale_range = torch.cat([scale_range, torch.tensor([1e8])])
scale_label = - torch.ones(bbox.shape[0], dtype=torch.long)
for idx in range(scale_range.shape[0] - 1):
inds = (area > scale_range[idx]) & (area < scale_range[idx+1])
scale_label[inds] = idx
# normalize
if scale_acc.max() > 0:
scale_acc = scale_acc / scale_acc.max()
thres = thr * (0.375 * scale_acc[scale_label] + 0.625)
select = score.ge(thres).bool()
bbox = bbox[select]
label = label[select]
idx_1 = torch.nonzero(select).reshape(-1)
if min_size is not None:
bw = bbox[:, 2] - bbox[:, 0]
bh = bbox[:, 3] - bbox[:, 1]
valid = (bw > min_size) & (bh > min_size)
bbox = bbox[valid]
if label is not None:
label = label[valid]
idx_2 = idx_1[valid]
idx = torch.zeros(bbox_.shape[0], device=idx_2.device).scatter_(
0, idx_2, torch.ones(idx_2.shape[0], device=idx_2.device)).bool()
if not return_inds:
return bbox, label
else:
return bbox, label, idx
| 12,472 | 32.084881 | 115 | py |
PseCo | PseCo-master/ssod/datasets/builder.py | from collections.abc import Mapping, Sequence
from functools import partial
import torch
from mmcv.parallel import DataContainer
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from mmdet.datasets.builder import worker_init_fn
from mmdet.datasets.samplers import (
DistributedGroupSampler,
DistributedSampler,
GroupSampler,
)
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import ipdb
SAMPLERS = Registry("sampler")
SAMPLERS.register_module(module=DistributedGroupSampler)
SAMPLERS.register_module(module=DistributedSampler)
SAMPLERS.register_module(module=GroupSampler)
def build_sampler(cfg, dist=False, group=False, default_args=None):
if cfg and ("type" in cfg):
sampler_type = cfg.get("type")
else:
sampler_type = default_args.get("type")
if group:
sampler_type = "Group" + sampler_type
if dist:
sampler_type = "Distributed" + sampler_type
if cfg:
cfg.update(type=sampler_type)
else:
cfg = dict(type=sampler_type)
return build_from_cfg(cfg, SAMPLERS, default_args)
def build_dataloader(
dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
sampler_cfg=None,
**kwargs,
):
rank, world_size = get_dist_info()
default_sampler_cfg = dict(type="Sampler", dataset=dataset)
if shuffle:
default_sampler_cfg.update(samples_per_gpu=samples_per_gpu)
else:
default_sampler_cfg.update(shuffle=False)
if dist:
default_sampler_cfg.update(num_replicas=world_size, rank=rank, seed=seed)
sampler = build_sampler(sampler_cfg, dist, shuffle, default_sampler_cfg)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = (
build_sampler(sampler_cfg, dist, shuffle, default_args=default_sampler_cfg)
if shuffle
else None
)
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = (
partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)
if seed is not None
else None
)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu, flatten=True),
pin_memory=False,
worker_init_fn=init_fn,
persistent_workers=True,
**kwargs,
)
return data_loader
def collate(batch, samples_per_gpu=1, flatten=False):
"""Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
"""
if not isinstance(batch, Sequence):
raise TypeError(f"{batch.dtype} is not supported.")
if isinstance(batch[0], DataContainer):
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i : i + samples_per_gpu]]
)
return DataContainer(
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True
)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
if batch[i].pad_dims is not None:
ndim = batch[i].dim()
assert ndim > batch[i].pad_dims
max_shape = [0 for _ in range(batch[i].pad_dims)]
for dim in range(1, batch[i].pad_dims + 1):
max_shape[dim - 1] = batch[i].size(-dim)
for sample in batch[i : i + samples_per_gpu]:
for dim in range(0, ndim - batch[i].pad_dims):
assert batch[i].size(dim) == sample.size(dim)
for dim in range(1, batch[i].pad_dims + 1):
max_shape[dim - 1] = max(
max_shape[dim - 1], sample.size(-dim)
)
padded_samples = []
for sample in batch[i : i + samples_per_gpu]:
pad = [0 for _ in range(batch[i].pad_dims * 2)]
for dim in range(1, batch[i].pad_dims + 1):
pad[2 * dim - 1] = max_shape[dim - 1] - sample.size(-dim)
padded_samples.append(
F.pad(sample.data, pad, value=sample.padding_value)
)
stacked.append(default_collate(padded_samples))
elif batch[i].pad_dims is None:
stacked.append(
default_collate(
[sample.data for sample in batch[i : i + samples_per_gpu]]
)
)
else:
raise ValueError("pad_dims should be either None or integers (1-3)")
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i : i + samples_per_gpu]]
)
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif any([isinstance(b, Sequence) for b in batch]):
if flatten:
flattened = []
for b in batch:
if isinstance(b, Sequence):
flattened.extend(b)
else:
flattened.extend([b])
return collate(flattened, len(flattened))
else:
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], Mapping):
return {
key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]
}
else:
return default_collate(batch)
| 6,383 | 34.664804 | 88 | py |
PseCo | PseCo-master/ssod/datasets/samplers/semi_sampler.py | from __future__ import division
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data import Sampler, WeightedRandomSampler
from ..builder import SAMPLERS
import ipdb
@SAMPLERS.register_module()
class GroupSemiBalanceSampler(Sampler):
def __init__(
self,
dataset,
by_prob=False,
epoch_length=7330,
sample_ratio=None,
samples_per_gpu=1,
**kwargs):
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.epoch = 0
self.by_prob = by_prob
assert hasattr(self.dataset, "flag")
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.total_size = 0
self.cumulative_sizes = dataset.cumulative_sizes
# decide the frequency to sample each kind of datasets
if not isinstance(sample_ratio, list):
sample_ratio = [sample_ratio] * len(self.cumulative_sizes)
self.sample_ratio = sample_ratio
self.sample_ratio = [
int(sr / min(self.sample_ratio)) for sr in self.sample_ratio
]
self.size_of_dataset = []
cumulative_sizes = [0] + self.cumulative_sizes
for i, _ in enumerate(self.group_sizes):
size_of_dataset = 0
cur_group_inds = np.where(self.flag == i)[0]
for j in range(len(self.cumulative_sizes)):
cur_group_cur_dataset = np.where(
np.logical_and(
cur_group_inds > cumulative_sizes[j],
cur_group_inds < cumulative_sizes[j + 1],
)
)[0]
size_per_dataset = len(cur_group_cur_dataset)
size_of_dataset = max(
size_of_dataset, np.ceil(size_per_dataset / self.sample_ratio[j])
)
self.size_of_dataset.append(
int(np.ceil(size_of_dataset / self.samples_per_gpu))
* self.samples_per_gpu
)
for j in range(len(self.cumulative_sizes)):
self.total_size += self.size_of_dataset[-1] * self.sample_ratio[j]
group_factor = [g / sum(self.group_sizes) for g in self.group_sizes]
self.epoch_length = [int(np.round(gf * epoch_length)) for gf in group_factor]
self.epoch_length[-1] = epoch_length - sum(self.epoch_length[:-1])
def __iter__(self):
indices = []
cumulative_sizes = [0] + self.cumulative_sizes
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
indice_per_dataset = []
for j in range(len(self.cumulative_sizes)):
indice_per_dataset.append(
indice[
np.where(
np.logical_and(
indice >= cumulative_sizes[j],
indice < cumulative_sizes[j + 1],
)
)[0]
]
)
for s in indice_per_dataset:
np.random.shuffle(s)
shuffled_indice_per_dataset = indice_per_dataset.copy()
# split into
total_indice = []
batch_idx = 0
# pdb.set_trace()
while batch_idx < self.epoch_length[i]:
ratio = [x / sum(self.sample_ratio) for x in self.sample_ratio]
# num of each dataset
ratio = [int(r * self.samples_per_gpu) for r in ratio]
ratio[-1] = self.samples_per_gpu - sum(ratio[:-1])
selected = []
# print(ratio)
for j in range(len(shuffled_indice_per_dataset)):
if len(shuffled_indice_per_dataset[j]) < ratio[j]:
shuffled_indice_per_dataset[j] = np.concatenate(
(
shuffled_indice_per_dataset[j],
np.random.shuffle(indice_per_dataset[j])
)
)
selected.append(shuffled_indice_per_dataset[j][: ratio[j]])
shuffled_indice_per_dataset[j] = shuffled_indice_per_dataset[j][
ratio[j] :
]
selected = np.concatenate(selected)
total_indice.append(selected)
batch_idx += 1
# print(self.size_of_dataset)
indice = np.concatenate(total_indice)
indices.append(indice)
indices = np.concatenate(indices) # k
indices = [
indices[j]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu)
)
for j in range(
i * self.samples_per_gpu,
(i + 1) * self.samples_per_gpu,
)
]
assert len(indices) == len(self)
return iter(indices)
def __len__(self):
return sum(self.epoch_length) * self.samples_per_gpu
@SAMPLERS.register_module()
class DistributedGroupSemiBalanceSampler(Sampler):
def __init__(
self,
dataset,
by_prob=False,
epoch_length=7330,
sample_ratio=None,
samples_per_gpu=1,
num_replicas=None,
rank=None,
**kwargs
):
# check to avoid some problem
assert samples_per_gpu > 1, "samples_per_gpu should be greater than 1."
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.by_prob = by_prob
assert hasattr(self.dataset, "flag")
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
self.cumulative_sizes = dataset.cumulative_sizes
# decide the frequency to sample each kind of datasets
if not isinstance(sample_ratio, list):
sample_ratio = [sample_ratio] * len(self.cumulative_sizes)
self.sample_ratio = sample_ratio
self.sample_ratio = [
int(sr / min(self.sample_ratio)) for sr in self.sample_ratio
]
self.size_of_dataset = []
cumulative_sizes = [0] + self.cumulative_sizes
for i, _ in enumerate(self.group_sizes):
size_of_dataset = 0
cur_group_inds = np.where(self.flag == i)[0]
for j in range(len(self.cumulative_sizes)):
cur_group_cur_dataset = np.where(
np.logical_and(
cur_group_inds > cumulative_sizes[j],
cur_group_inds < cumulative_sizes[j + 1],
)
)[0]
size_per_dataset = len(cur_group_cur_dataset)
size_of_dataset = max(
size_of_dataset, np.ceil(size_per_dataset / self.sample_ratio[j])
)
self.size_of_dataset.append(
int(np.ceil(size_of_dataset / self.samples_per_gpu / self.num_replicas))
* self.samples_per_gpu
)
for j in range(len(self.cumulative_sizes)):
self.num_samples += self.size_of_dataset[-1] * self.sample_ratio[j]
self.total_size = self.num_samples * self.num_replicas
group_factor = [g / sum(self.group_sizes) for g in self.group_sizes]
self.epoch_length = [int(np.round(gf * epoch_length)) for gf in group_factor]
self.epoch_length[-1] = epoch_length - sum(self.epoch_length[:-1])
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
cumulative_sizes = [0] + self.cumulative_sizes
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
indice_per_dataset = []
for j in range(len(self.cumulative_sizes)):
indice_per_dataset.append(
indice[
np.where(
np.logical_and(
indice >= cumulative_sizes[j],
indice < cumulative_sizes[j + 1],
)
)[0]
]
)
shuffled_indice_per_dataset = [
s[list(torch.randperm(int(s.shape[0]), generator=g).numpy())]
for s in indice_per_dataset
]
# split into
total_indice = []
batch_idx = 0
# pdb.set_trace()
while batch_idx < self.epoch_length[i] * self.num_replicas:
ratio = [x / sum(self.sample_ratio) for x in self.sample_ratio]
if self.by_prob:
indicator = list(
WeightedRandomSampler(
ratio,
self.samples_per_gpu,
replacement=True,
generator=g,
)
)
unique, counts = np.unique(indicator, return_counts=True)
ratio = [0] * len(shuffled_indice_per_dataset)
for u, c in zip(unique, counts):
ratio[u] = c
assert len(ratio) == 2, "Only two set is supported"
if ratio[0] == 0:
ratio[0] = 1
ratio[1] -= 1
elif ratio[1] == 0:
ratio[1] = 1
ratio[0] -= 1
ratio = [r / sum(ratio) for r in ratio]
# num of each dataset
ratio = [int(r * self.samples_per_gpu) for r in ratio]
ratio[-1] = self.samples_per_gpu - sum(ratio[:-1])
selected = []
# print(ratio)
for j in range(len(shuffled_indice_per_dataset)):
if len(shuffled_indice_per_dataset[j]) < ratio[j]:
shuffled_indice_per_dataset[j] = np.concatenate(
(
shuffled_indice_per_dataset[j],
indice_per_dataset[j][
list(
torch.randperm(
int(indice_per_dataset[j].shape[0]),
generator=g,
).numpy()
)
],
)
)
selected.append(shuffled_indice_per_dataset[j][: ratio[j]])
shuffled_indice_per_dataset[j] = shuffled_indice_per_dataset[j][
ratio[j] :
]
selected = np.concatenate(selected)
total_indice.append(selected)
batch_idx += 1
# print(self.size_of_dataset)
indice = np.concatenate(total_indice)
indices.append(indice)
indices = np.concatenate(indices) # k
indices = [
indices[j]
for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu,
generator=g,
)
)
for j in range(
i * self.samples_per_gpu,
(i + 1) * self.samples_per_gpu,
)
]
offset = len(self) * self.rank
indices = indices[offset : offset + len(self)]
assert len(indices) == len(self)
return iter(indices)
def __len__(self):
return sum(self.epoch_length) * self.samples_per_gpu
def set_epoch(self, epoch):
self.epoch = epoch
# duplicated, implement it by weight instead of sampling
# def update_sample_ratio(self):
# if self.dynamic_step is not None:
# self.sample_ratio = [d(self.epoch) for d in self.dynamic]
| 13,142 | 38.587349 | 88 | py |
PseCo | PseCo-master/ssod/datasets/pipelines/rand_aug.py | """
Modified from https://github.com/google-research/ssl_detection/blob/master/detection/utils/augmentation.py.
"""
import copy
import os
import os.path as osp
import cv2
import mmcv
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
from mmcv.image.colorspace import bgr2rgb, rgb2bgr
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines import Compose as BaseCompose
from mmdet.datasets.pipelines import transforms
from .geo_utils import GeometricTransformationBase as GTrans
import ipdb
PARAMETER_MAX = 10
def visualize_bboxes(img_metas, imgs_torch, bboxes, tag='student'):
img_norm_cfg = img_metas[0]['img_norm_cfg']
filenames = [osp.basename(img_meta['filename']) for img_meta in img_metas]
if tag is not None:
filenames = [osp.splitext(filename)[0] + '_' + tag + '.jpg' for filename in filenames]
imgs_np = mmcv.tensor2imgs(imgs_torch,
img_norm_cfg['mean'], img_norm_cfg['std'], img_norm_cfg['to_rgb'])
save_root = '/home/SENSETIME/ligang2/Works/SSOD/paper_figures/'
for img_np, box, filename in zip(imgs_np, bboxes, filenames):
_visualize_bboxes(img_np, box, filename, save_root)
def _visualize_bboxes(img, gt_bboxes, filename, save_root):
"""Plot images and boxes."""
save_name = os.path.join(save_root, filename)
if gt_bboxes is not None:
for gt_box in gt_bboxes:
cv2.rectangle(img, (int(gt_box[0]), int(gt_box[1])),
(int(gt_box[2]), int(gt_box[3])), (0, 0, 255), 2)
cv2.imwrite(save_name, img)
def int_parameter(level, maxval, max_level=None):
if max_level is None:
max_level = PARAMETER_MAX
return int(level * maxval / max_level)
def float_parameter(level, maxval, max_level=None):
if max_level is None:
max_level = PARAMETER_MAX
return float(level) * maxval / max_level
class RandAug(object):
"""refer to https://github.com/google-research/ssl_detection/blob/00d52272f
61b56eade8d5ace18213cba6c74f6d8/detection/utils/augmentation.py#L240."""
def __init__(
self,
prob: float = 1.0,
magnitude: int = 10,
random_magnitude: bool = True,
record: bool = False,
magnitude_limit: int = 10,
):
assert 0 <= prob <= 1, f"probability should be in (0,1) but get {prob}"
assert (
magnitude <= PARAMETER_MAX
), f"magnitude should be small than max value {PARAMETER_MAX} but get {magnitude}"
self.prob = prob
self.magnitude = magnitude
self.magnitude_limit = magnitude_limit
self.random_magnitude = random_magnitude
self.record = record
self.buffer = None
def __call__(self, results):
if np.random.random() < self.prob:
magnitude = self.magnitude
if self.random_magnitude:
magnitude = np.random.randint(1, magnitude)
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
results["aug_info"].append(self.get_aug_info(magnitude=magnitude))
results = self.apply(results, magnitude)
# clear buffer
return results
def apply(self, results, magnitude: int = None):
raise NotImplementedError()
def __repr__(self):
return f"{self.__class__.__name__}(prob={self.prob},magnitude={self.magnitude},max_magnitude={self.magnitude_limit},random_magnitude={self.random_magnitude})"
def get_aug_info(self, **kwargs):
aug_info = dict(type=self.__class__.__name__)
aug_info.update(
dict(
prob=1.0,
random_magnitude=False,
record=False,
magnitude=self.magnitude,
)
)
aug_info.update(kwargs)
return aug_info
def enable_record(self, mode: bool = True):
self.record = mode
@PIPELINES.register_module()
class Identity(RandAug):
def apply(self, results, magnitude: int = None):
return results
@PIPELINES.register_module()
class AutoContrast(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(ImageOps.autocontrast(Image.fromarray(img)), dtype=img.dtype)
)
return results
@PIPELINES.register_module()
class RandEqualize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(ImageOps.equalize(Image.fromarray(img)), dtype=img.dtype)
)
return results
@PIPELINES.register_module()
class RandSolarize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = results[key]
results[key] = mmcv.solarize(
img, min(int_parameter(magnitude, 256, self.magnitude_limit), 255)
)
return results
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of
PIL."""
def impl(pil_img, level, max_level=None):
v = float_parameter(level, 1.8, max_level) + 0.1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
class RandEnhance(RandAug):
op = None
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
results[key] = rgb2bgr(
np.asarray(
_enhancer_impl(self.op)(
Image.fromarray(img), magnitude, self.magnitude_limit
),
dtype=img.dtype,
)
)
return results
@PIPELINES.register_module()
class RandColor(RandEnhance):
op = ImageEnhance.Color
@PIPELINES.register_module()
class RandContrast(RandEnhance):
op = ImageEnhance.Contrast
@PIPELINES.register_module()
class RandBrightness(RandEnhance):
op = ImageEnhance.Brightness
@PIPELINES.register_module()
class RandSharpness(RandEnhance):
op = ImageEnhance.Sharpness
@PIPELINES.register_module()
class RandPosterize(RandAug):
def apply(self, results, magnitude=None):
for key in results.get("img_fields", ["img"]):
img = bgr2rgb(results[key])
magnitude = int_parameter(magnitude, 4, self.magnitude_limit)
results[key] = rgb2bgr(
np.asarray(
ImageOps.posterize(Image.fromarray(img), 4 - magnitude),
dtype=img.dtype,
)
)
return results
@PIPELINES.register_module()
class Sequential(BaseCompose):
def __init__(self, transforms, record: bool = False):
super().__init__(transforms)
self.record = record
self.enable_record(record)
def enable_record(self, mode: bool = True):
# enable children to record
self.record = mode
for transform in self.transforms:
transform.enable_record(mode)
@PIPELINES.register_module()
class OneOf(Sequential):
def __init__(self, transforms, record: bool = False):
self.transforms = []
for trans in transforms:
if isinstance(trans, list):
self.transforms.append(Sequential(trans))
else:
assert isinstance(trans, dict)
self.transforms.append(Sequential([trans]))
self.enable_record(record)
def __call__(self, results):
transform = np.random.choice(self.transforms)
return transform(results)
@PIPELINES.register_module()
class ShuffledSequential(Sequential):
def __call__(self, data):
order = np.random.permutation(len(self.transforms))
for idx in order:
t = self.transforms[idx]
data = t(data)
if data is None:
return None
return data
"""
Geometric Augmentation. Modified from thirdparty/mmdetection/mmdet/datasets/pipelines/auto_augment.py
"""
def bbox2fields():
"""The key correspondence from bboxes to labels, masks and
segmentations."""
bbox2label = {"gt_bboxes": "gt_labels", "gt_bboxes_ignore": "gt_labels_ignore"}
bbox2mask = {"gt_bboxes": "gt_masks", "gt_bboxes_ignore": "gt_masks_ignore"}
bbox2seg = {
"gt_bboxes": "gt_semantic_seg",
}
return bbox2label, bbox2mask, bbox2seg
class GeometricAugmentation(object):
def __init__(
self,
img_fill_val=125,
seg_ignore_label=255,
min_size=0,
prob: float = 1.0,
random_magnitude: bool = True,
record: bool = False,
):
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, "img_fill_val as tuple must have 3 elements."
img_fill_val = tuple([float(val) for val in img_fill_val])
assert np.all(
[0 <= val <= 255 for val in img_fill_val]
), "all elements of img_fill_val should between range [0,255]."
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.min_size = min_size
self.prob = prob
self.random_magnitude = random_magnitude
self.record = record
def __call__(self, results):
if np.random.random() < self.prob:
magnitude: dict = self.get_magnitude(results)
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
results["aug_info"].append(self.get_aug_info(**magnitude))
results = self.apply(results, **magnitude)
self._filter_invalid(results, min_size=self.min_size)
return results
def get_magnitude(self, results) -> dict:
raise NotImplementedError()
def apply(self, results, **kwargs):
raise NotImplementedError()
def enable_record(self, mode: bool = True):
self.record = mode
def get_aug_info(self, **kwargs):
aug_info = dict(type=self.__class__.__name__)
aug_info.update(
dict(
# make op deterministic
prob=1.0,
random_magnitude=False,
record=False,
img_fill_val=self.img_fill_val,
seg_ignore_label=self.seg_ignore_label,
min_size=self.min_size,
)
)
aug_info.update(kwargs)
return aug_info
def _filter_invalid(self, results, min_size=0):
"""Filter bboxes and masks too small or translated out of image."""
if min_size is None:
return results
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get("bbox_fields", []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
return results
def __repr__(self):
return f"""{self.__class__.__name__}(
img_fill_val={self.img_fill_val},
seg_ignore_label={self.seg_ignore_label},
min_size={self.magnitude},
prob: float = {self.prob},
random_magnitude: bool = {self.random_magnitude},
)"""
@PIPELINES.register_module()
class RandTranslate(GeometricAugmentation):
def __init__(self, x=None, y=None, **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
if self.x is None and self.y is None:
self.prob = 0.0
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
if isinstance(self.x, (list, tuple)):
assert len(self.x) == 2
x = np.random.random() * (self.x[1] - self.x[0]) + self.x[0]
magnitude["x"] = x
if isinstance(self.y, (list, tuple)):
assert len(self.y) == 2
y = np.random.random() * (self.y[1] - self.y[0]) + self.y[0]
magnitude["y"] = y
else:
if self.x is not None:
assert isinstance(self.x, (int, float))
magnitude["x"] = self.x
if self.y is not None:
assert isinstance(self.y, (int, float))
magnitude["y"] = self.y
return magnitude
def apply(self, results, x=None, y=None):
# ratio to pixel
h, w, c = results["img_shape"]
if x is not None:
x = w * x
if y is not None:
y = h * y
if x is not None:
# translate horizontally
self._translate(results, x)
if y is not None:
# translate veritically
self._translate(results, y, direction="vertical")
return results
def _translate(self, results, offset, direction="horizontal"):
if self.record:
GTrans.apply(
results,
"shift",
dx=offset if direction == "horizontal" else 0,
dy=offset if direction == "vertical" else 0,
)
self._translate_img(results, offset, direction=direction)
self._translate_bboxes(results, offset, direction=direction)
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
self._translate_masks(results, offset, direction=direction)
self._translate_seg(
results, offset, fill_val=self.seg_ignore_label, direction=direction
)
def _translate_img(self, results, offset, direction="horizontal"):
for key in results.get("img_fields", ["img"]):
img = results[key].copy()
results[key] = mmcv.imtranslate(
img, offset, direction, self.img_fill_val
).astype(img.dtype)
def _translate_bboxes(self, results, offset, direction="horizontal"):
"""Shift bboxes horizontally or vertically, according to offset."""
h, w, c = results["img_shape"]
for key in results.get("bbox_fields", []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1
)
if direction == "horizontal":
min_x = np.maximum(0, min_x + offset)
max_x = np.minimum(w, max_x + offset)
elif direction == "vertical":
min_y = np.maximum(0, min_y + offset)
max_y = np.minimum(h, max_y + offset)
# the boxes translated outside of image will be filtered along with
# the corresponding masks, by invoking ``_filter_invalid``.
results[key] = np.concatenate([min_x, min_y, max_x, max_y], axis=-1)
def _translate_masks(self, results, offset, direction="horizontal", fill_val=0):
"""Translate masks horizontally or vertically."""
h, w, c = results["img_shape"]
for key in results.get("mask_fields", []):
masks = results[key]
results[key] = masks.translate((h, w), offset, direction, fill_val)
def _translate_seg(self, results, offset, direction="horizontal", fill_val=255):
"""Translate segmentation maps horizontally or vertically."""
for key in results.get("seg_fields", []):
seg = results[key].copy()
results[key] = mmcv.imtranslate(seg, offset, direction, fill_val).astype(
seg.dtype
)
def __repr__(self):
repr_str = super().__repr__()
return ("\n").join(
repr_str.split("\n")[:-1]
+ [f"x={self.x}", f"y={self.y}"]
+ repr_str.split("\n")[-1:]
)
@PIPELINES.register_module()
class RandRotate(GeometricAugmentation):
def __init__(self, angle=None, center=None, scale=1, **kwargs):
super().__init__(**kwargs)
self.angle = angle
self.center = center
self.scale = scale
if self.angle is None:
self.prob = 0.0
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
if isinstance(self.angle, (list, tuple)):
assert len(self.angle) == 2
angle = (
np.random.random() * (self.angle[1] - self.angle[0]) + self.angle[0]
)
magnitude["angle"] = angle
else:
if self.angle is not None:
assert isinstance(self.angle, (int, float))
magnitude["angle"] = self.angle
return magnitude
def apply(self, results, angle: float = None):
h, w = results["img"].shape[:2]
center = self.center
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
self._rotate_img(results, angle, center, self.scale)
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
if self.record:
GTrans.apply(results, "rotate", cv2_rotation_matrix=rotate_matrix)
self._rotate_bboxes(results, rotate_matrix)
self._rotate_masks(results, angle, center, self.scale, fill_val=0)
self._rotate_seg(
results, angle, center, self.scale, fill_val=self.seg_ignore_label
)
return results
def _rotate_img(self, results, angle, center=None, scale=1.0):
"""Rotate the image.
Args:
results (dict): Result dict from loading pipeline.
angle (float): Rotation angle in degrees, positive values
mean clockwise rotation. Same in ``mmcv.imrotate``.
center (tuple[float], optional): Center point (w, h) of the
rotation. Same in ``mmcv.imrotate``.
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
"""
for key in results.get("img_fields", ["img"]):
img = results[key].copy()
img_rotated = mmcv.imrotate(
img, angle, center, scale, border_value=self.img_fill_val
)
results[key] = img_rotated.astype(img.dtype)
def _rotate_bboxes(self, results, rotate_matrix):
"""Rotate the bboxes."""
h, w, c = results["img_shape"]
for key in results.get("bbox_fields", []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1
)
coordinates = np.stack(
[[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]]
) # [4, 2, nb_bbox, 1]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coordinates = np.concatenate(
(
coordinates,
np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype),
),
axis=1,
) # [4, 3, nb_bbox, 1]
coordinates = coordinates.transpose((2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
rotated_coords = np.matmul(rotate_matrix, coordinates) # [nb_bbox, 4, 2, 1]
rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
min_x, min_y = (
np.min(rotated_coords[:, :, 0], axis=1),
np.min(rotated_coords[:, :, 1], axis=1),
)
max_x, max_y = (
np.max(rotated_coords[:, :, 0], axis=1),
np.max(rotated_coords[:, :, 1], axis=1),
)
min_x, min_y = (
np.clip(min_x, a_min=0, a_max=w),
np.clip(min_y, a_min=0, a_max=h),
)
max_x, max_y = (
np.clip(max_x, a_min=min_x, a_max=w),
np.clip(max_y, a_min=min_y, a_max=h),
)
results[key] = np.stack([min_x, min_y, max_x, max_y], axis=-1).astype(
results[key].dtype
)
def _rotate_masks(self, results, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the masks."""
h, w, c = results["img_shape"]
for key in results.get("mask_fields", []):
masks = results[key]
results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
def _rotate_seg(self, results, angle, center=None, scale=1.0, fill_val=255):
"""Rotate the segmentation map."""
for key in results.get("seg_fields", []):
seg = results[key].copy()
results[key] = mmcv.imrotate(
seg, angle, center, scale, border_value=fill_val
).astype(seg.dtype)
def __repr__(self):
repr_str = super().__repr__()
return ("\n").join(
repr_str.split("\n")[:-1]
+ [f"angle={self.angle}", f"center={self.center}", f"scale={self.scale}"]
+ repr_str.split("\n")[-1:]
)
@PIPELINES.register_module()
class RandShear(GeometricAugmentation):
def __init__(self, x=None, y=None, interpolation="bilinear", **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
self.interpolation = interpolation
if self.x is None and self.y is None:
self.prob = 0.0
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
if isinstance(self.x, (list, tuple)):
assert len(self.x) == 2
x = np.random.random() * (self.x[1] - self.x[0]) + self.x[0]
magnitude["x"] = x
if isinstance(self.y, (list, tuple)):
assert len(self.y) == 2
y = np.random.random() * (self.y[1] - self.y[0]) + self.y[0]
magnitude["y"] = y
else:
if self.x is not None:
assert isinstance(self.x, (int, float))
magnitude["x"] = self.x
if self.y is not None:
assert isinstance(self.y, (int, float))
magnitude["y"] = self.y
return magnitude
def apply(self, results, x=None, y=None):
if x is not None:
# translate horizontally
self._shear(results, np.tanh(-x * np.pi / 180))
if y is not None:
# translate veritically
self._shear(results, np.tanh(y * np.pi / 180), direction="vertical")
return results
def _shear(self, results, magnitude, direction="horizontal"):
if self.record:
GTrans.apply(results, "shear", magnitude=magnitude, direction=direction)
self._shear_img(results, magnitude, direction, interpolation=self.interpolation)
self._shear_bboxes(results, magnitude, direction=direction)
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
self._shear_masks(
results, magnitude, direction=direction, interpolation=self.interpolation
)
self._shear_seg(
results,
magnitude,
direction=direction,
interpolation=self.interpolation,
fill_val=self.seg_ignore_label,
)
def _shear_img(
self, results, magnitude, direction="horizontal", interpolation="bilinear"
):
"""Shear the image.
Args:
results (dict): Result dict from loading pipeline.
magnitude (int | float): The magnitude used for shear.
direction (str): The direction for shear, either "horizontal"
or "vertical".
interpolation (str): Same as in :func:`mmcv.imshear`.
"""
for key in results.get("img_fields", ["img"]):
img = results[key]
img_sheared = mmcv.imshear(
img,
magnitude,
direction,
border_value=self.img_fill_val,
interpolation=interpolation,
)
results[key] = img_sheared.astype(img.dtype)
def _shear_bboxes(self, results, magnitude, direction="horizontal"):
"""Shear the bboxes."""
h, w, c = results["img_shape"]
if direction == "horizontal":
shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(
np.float32
) # [2, 2]
else:
shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32)
for key in results.get("bbox_fields", []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1
)
coordinates = np.stack(
[[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]]
) # [4, 2, nb_box, 1]
coordinates = (
coordinates[..., 0].transpose((2, 1, 0)).astype(np.float32)
) # [nb_box, 2, 4]
new_coords = np.matmul(
shear_matrix[None, :, :], coordinates
) # [nb_box, 2, 4]
min_x = np.min(new_coords[:, 0, :], axis=-1)
min_y = np.min(new_coords[:, 1, :], axis=-1)
max_x = np.max(new_coords[:, 0, :], axis=-1)
max_y = np.max(new_coords[:, 1, :], axis=-1)
min_x = np.clip(min_x, a_min=0, a_max=w)
min_y = np.clip(min_y, a_min=0, a_max=h)
max_x = np.clip(max_x, a_min=min_x, a_max=w)
max_y = np.clip(max_y, a_min=min_y, a_max=h)
results[key] = np.stack([min_x, min_y, max_x, max_y], axis=-1).astype(
results[key].dtype
)
def _shear_masks(
self,
results,
magnitude,
direction="horizontal",
fill_val=0,
interpolation="bilinear",
):
"""Shear the masks."""
h, w, c = results["img_shape"]
for key in results.get("mask_fields", []):
masks = results[key]
results[key] = masks.shear(
(h, w),
magnitude,
direction,
border_value=fill_val,
interpolation=interpolation,
)
def _shear_seg(
self,
results,
magnitude,
direction="horizontal",
fill_val=255,
interpolation="bilinear",
):
"""Shear the segmentation maps."""
for key in results.get("seg_fields", []):
seg = results[key]
results[key] = mmcv.imshear(
seg,
magnitude,
direction,
border_value=fill_val,
interpolation=interpolation,
).astype(seg.dtype)
def __repr__(self):
repr_str = super().__repr__()
return ("\n").join(
repr_str.split("\n")[:-1]
+ [f"x_magnitude={self.x}", f"y_magnitude={self.y}"]
+ repr_str.split("\n")[-1:]
)
@PIPELINES.register_module()
class RandErase(GeometricAugmentation):
def __init__(
self,
n_iterations=None,
size=None,
squared: bool = True,
patches=None,
**kwargs,
):
kwargs.update(min_size=None)
super().__init__(**kwargs)
self.n_iterations = n_iterations
self.size = size
self.squared = squared
self.patches = patches
def get_magnitude(self, results):
magnitude = {}
if self.random_magnitude:
n_iterations = self._get_erase_cycle()
patches = []
h, w, c = results["img_shape"]
for i in range(n_iterations):
# random sample patch size in the image
ph, pw = self._get_patch_size(h, w)
# random sample patch left top in the image
px, py = np.random.randint(0, w - pw), np.random.randint(0, h - ph)
patches.append([px, py, px + pw, py + ph])
magnitude["patches"] = patches
else:
assert self.patches is not None
magnitude["patches"] = self.patches
return magnitude
def _get_erase_cycle(self):
if isinstance(self.n_iterations, int):
n_iterations = self.n_iterations
else:
assert (
isinstance(self.n_iterations, (tuple, list))
and len(self.n_iterations) == 2
)
n_iterations = np.random.randint(*self.n_iterations)
return n_iterations
def _get_patch_size(self, h, w):
if isinstance(self.size, float):
assert 0 < self.size < 1
return int(self.size * h), int(self.size * w)
else:
assert isinstance(self.size, (tuple, list))
assert len(self.size) == 2
assert 0 <= self.size[0] < 1 and 0 <= self.size[1] < 1
w_ratio = np.random.random() * (self.size[1] - self.size[0]) + self.size[0]
h_ratio = w_ratio
if not self.squared:
h_ratio = (
np.random.random() * (self.size[1] - self.size[0]) + self.size[0]
)
return int(h_ratio * h), int(w_ratio * w)
def apply(self, results, patches: list):
for patch in patches:
self._erase_image(results, patch, fill_val=self.img_fill_val)
self._erase_mask(results, patch)
self._erase_seg(results, patch, fill_val=self.seg_ignore_label)
return results
def _erase_image(self, results, patch, fill_val=128):
for key in results.get("img_fields", ["img"]):
tmp = results[key].copy()
x1, y1, x2, y2 = patch
tmp[y1:y2, x1:x2, :] = fill_val
results[key] = tmp
def _erase_mask(self, results, patch, fill_val=0):
for key in results.get("mask_fields", []):
masks = results[key]
if isinstance(masks, PolygonMasks):
# convert mask to bitmask
masks = masks.to_bitmap()
x1, y1, x2, y2 = patch
tmp = masks.masks.copy()
tmp[:, y1:y2, x1:x2] = fill_val
masks = BitmapMasks(tmp, masks.height, masks.width)
results[key] = masks
def _erase_seg(self, results, patch, fill_val=0):
for key in results.get("seg_fields", []):
seg = results[key].copy()
x1, y1, x2, y2 = patch
seg[y1:y2, x1:x2] = fill_val
results[key] = seg
@PIPELINES.register_module()
class RecomputeBox(object):
def __init__(self, record=False):
self.record = record
def __call__(self, results):
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
results["aug_info"].append(dict(type="RecomputeBox"))
_, bbox2mask, _ = bbox2fields()
for key in results.get("bbox_fields", []):
mask_key = bbox2mask.get(key)
if mask_key in results:
masks = results[mask_key]
results[key] = self._recompute_bbox(masks)
return results
def enable_record(self, mode: bool = True):
self.record = mode
def _recompute_bbox(self, masks):
boxes = np.zeros(masks.masks.shape[0], 4, dtype=np.float32)
x_any = np.any(masks.masks, axis=1)
y_any = np.any(masks.masks, axis=2)
for idx in range(masks.masks.shape[0]):
x = np.where(x_any[idx, :])[0]
y = np.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = np.array(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32
)
return boxes
# TODO: Implement Augmentation Inside Box
@PIPELINES.register_module()
class RandResize(transforms.Resize):
def __init__(self, record=False, **kwargs):
super().__init__(**kwargs)
self.record = record
def __call__(self, results):
results = super().__call__(results)
if self.record:
scale_factor = results["scale_factor"]
GTrans.apply(results, "scale", sx=scale_factor[0], sy=scale_factor[1])
if "aug_info" not in results:
results["aug_info"] = []
new_h, new_w = results["img"].shape[:2]
results["aug_info"].append(
dict(
type=self.__class__.__name__,
record=False,
img_scale=(new_w, new_h),
keep_ratio=False,
bbox_clip_border=self.bbox_clip_border,
backend=self.backend,
)
)
return results
def enable_record(self, mode: bool = True):
self.record = mode
@PIPELINES.register_module()
class RandFlip(transforms.RandomFlip):
def __init__(self, record=False, **kwargs):
super().__init__(**kwargs)
self.record = record
def __call__(self, results):
results = super().__call__(results)
if self.record:
if "aug_info" not in results:
results["aug_info"] = []
if results["flip"]:
GTrans.apply(
results,
"flip",
direction=results["flip_direction"],
shape=results["img_shape"][:2],
)
results["aug_info"].append(
dict(
type=self.__class__.__name__,
record=False,
flip_ratio=1.0,
direction=results["flip_direction"],
)
)
else:
results["aug_info"].append(
dict(
type=self.__class__.__name__,
record=False,
flip_ratio=0.0,
direction="vertical",
)
)
return results
def enable_record(self, mode: bool = True):
self.record = mode
@PIPELINES.register_module()
class MultiBranch(object):
def __init__(self, **transform_group):
self.transform_group = {k: BaseCompose(v) for k, v in transform_group.items()}
def __call__(self, results):
multi_results = []
for k, v in self.transform_group.items():
res = v(copy.deepcopy(results))
if res is None:
return None
# res["img_metas"]["tag"] = k
multi_results.append(res)
return multi_results
| 35,191 | 34.475806 | 166 | py |
PseCo | PseCo-master/ssod/utils/logger.py | import logging
import os
import sys
from collections import Counter
from typing import Tuple
import mmcv
import numpy as np
import torch
from mmcv.runner.dist_utils import get_dist_info
from mmcv.utils import get_logger
from mmdet.core.visualization import imshow_det_bboxes
try:
import wandb
except:
wandb = None
_log_counter = Counter()
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name="mmdet.ssod", log_file=log_file, log_level=log_level)
logger.propagate = False
return logger
def _find_caller():
frame = sys._getframe(2)
while frame:
code = frame.f_code
if os.path.join("utils", "logger.") not in code.co_filename:
mod_name = frame.f_globals["__name__"]
if mod_name == "__main__":
mod_name = r"ssod"
return mod_name, (code.co_filename, frame.f_lineno, code.co_name)
frame = frame.f_back
def convert_box(tag, boxes, box_labels, class_labels, std, scores=None):
if isinstance(std, int):
std = [std, std]
if len(std) != 4:
std = std[::-1] * 2
std = boxes.new_tensor(std).reshape(1, 4)
wandb_box = {}
boxes = boxes / std
boxes = boxes.detach().cpu().numpy().tolist()
box_labels = box_labels.detach().cpu().numpy().tolist()
class_labels = {k: class_labels[k] for k in range(len(class_labels))}
wandb_box["class_labels"] = class_labels
assert len(boxes) == len(box_labels)
if scores is not None:
scores = scores.detach().cpu().numpy().tolist()
box_data = [
dict(
position=dict(minX=box[0], minY=box[1], maxX=box[2], maxY=box[3]),
class_id=label,
scores=dict(cls=scores[i]),
)
for i, (box, label) in enumerate(zip(boxes, box_labels))
]
else:
box_data = [
dict(
position=dict(minX=box[0], minY=box[1], maxX=box[2], maxY=box[3]),
class_id=label,
)
for i, (box, label) in enumerate(zip(boxes, box_labels))
]
wandb_box["box_data"] = box_data
return {tag: wandb.data_types.BoundingBoxes2D(wandb_box, tag)}
def color_transform(img_tensor, mean, std, to_rgb=False):
img_np = img_tensor.detach().cpu().numpy().transpose((1, 2, 0)).astype(np.float32)
return mmcv.imdenormalize(img_np, mean, std, to_bgr=not to_rgb)
def log_image_with_boxes(
tag: str,
image: torch.Tensor,
bboxes: torch.Tensor,
bbox_tag: str = None,
labels: torch.Tensor = None,
scores: torch.Tensor = None,
class_names: Tuple[str] = None,
filename: str = None,
img_norm_cfg: dict = None,
backend: str = "auto",
interval: int = 50,
):
rank, _ = get_dist_info()
if rank != 0:
return
_, key = _find_caller()
_log_counter[key] += 1
if not (interval == 1 or _log_counter[key] % interval == 1):
return
if backend == "auto":
if wandb is None:
backend = "file"
else:
backend = "wandb"
if backend == "wandb":
if wandb is None:
raise ImportError("wandb is not installed")
assert (
wandb.run is not None
), "wandb has not been initialized, call `wandb.init` first`"
elif backend != "file":
raise TypeError("backend must be file or wandb")
if filename is None:
filename = f"{_log_counter[key]}.jpg"
if bbox_tag is not None:
bbox_tag = "vis"
if img_norm_cfg is not None:
image = color_transform(image, **img_norm_cfg)
if labels is None:
labels = bboxes.new_zeros(bboxes.shape[0]).long()
class_names = ["foreground"]
if backend == "wandb":
im = {}
im["data_or_path"] = image
im["boxes"] = convert_box(
bbox_tag, bboxes, labels, class_names, scores=scores, std=image.shape[:2]
)
wandb.log({tag: wandb.Image(**im)}, commit=False)
elif backend == "file":
root_dir = os.environ.get("WORK_DIR", ".")
imshow_det_bboxes(
image,
bboxes.cpu().detach().numpy(),
labels.cpu().detach().numpy(),
class_names=class_names,
show=False,
out_file=os.path.join(root_dir, tag, bbox_tag, filename),
)
else:
raise TypeError("backend must be file or wandb")
def log_every_n(msg: str, n: int = 50, level: int = logging.DEBUG, backend="auto"):
"""
Args:
msg (Any):
n (int):
level (int):
name (str):
"""
caller_module, key = _find_caller()
_log_counter[key] += 1
if n == 1 or _log_counter[key] % n == 1:
if isinstance(msg, dict) and (wandb is not None) and (wandb.run is not None):
wandb.log(msg, commit=False)
else:
get_root_logger().log(level, msg)
| 5,179 | 28.942197 | 86 | py |
PseCo | PseCo-master/ssod/utils/structure_utils.py | import warnings
from collections import Counter, Mapping, Sequence
from numbers import Number
from typing import Dict, List
import numpy as np
import torch
from mmdet.core.mask.structures import BitmapMasks
from torch.nn import functional as F
import ipdb
_step_counter = Counter()
def list_concat(data_list: List[list]):
if isinstance(data_list[0], torch.Tensor):
return torch.cat(data_list)
else:
endpoint = [d for d in data_list[0]]
for i in range(1, len(data_list)):
endpoint.extend(data_list[i])
return endpoint
def sequence_concat(a, b):
if isinstance(a, Sequence) and isinstance(b, Sequence):
return a + b
else:
return None
def dict_concat(dicts: List[Dict[str, list]]):
return {k: list_concat([d[k] for d in dicts]) for k in dicts[0].keys()}
def dict_fuse(obj_list, reference_obj):
if isinstance(reference_obj, torch.Tensor):
return torch.stack(obj_list)
return obj_list
def dict_select(dict1: Dict[str, list], key: str, value: str):
flag = [v == value for v in dict1[key]]
return {
k: dict_fuse([vv for vv, ff in zip(v, flag) if ff], v) for k, v in dict1.items()
}
def dict_split(dict1, key):
group_names = list(set(dict1[key]))
dict_groups = {k: dict_select(dict1, key, k) for k in group_names}
return dict_groups
def dict_sum(a, b):
if isinstance(a, dict):
assert isinstance(b, dict)
return {k: dict_sum(v, b[k]) for k, v in a.items()}
elif isinstance(a, list):
assert len(a) == len(b)
return [dict_sum(aa, bb) for aa, bb in zip(a, b)]
else:
return a + b
def zero_like(tensor_pack, prefix=""):
if isinstance(tensor_pack, Sequence):
return [zero_like(t) for t in tensor_pack]
elif isinstance(tensor_pack, Mapping):
return {prefix + k: zero_like(v) for k, v in tensor_pack.items()}
elif isinstance(tensor_pack, torch.Tensor):
return tensor_pack.new_zeros(tensor_pack.shape)
elif isinstance(tensor_pack, np.ndarray):
return np.zeros_like(tensor_pack)
else:
warnings.warn("Unexpected data type {}".format(type(tensor_pack)))
return 0
def pad_stack(tensors, shape, pad_value=255):
tensors = torch.stack(
[
F.pad(
tensor,
pad=[0, shape[1] - tensor.shape[1], 0, shape[0] - tensor.shape[0]],
value=pad_value,
)
for tensor in tensors
]
)
return tensors
def result2bbox(result):
num_class = len(result)
bbox = np.concatenate(result)
if bbox.shape[0] == 0:
label = np.zeros(0, dtype=np.uint8)
else:
label = np.concatenate(
[[i] * len(result[i]) for i in range(num_class) if len(result[i]) > 0]
).reshape((-1,))
return bbox, label
def result2mask(result):
num_class = len(result)
mask = [np.stack(result[i]) for i in range(num_class) if len(result[i]) > 0]
if len(mask) > 0:
mask = np.concatenate(mask)
else:
mask = np.zeros((0, 1, 1))
return BitmapMasks(mask, mask.shape[1], mask.shape[2]), None
def sequence_mul(obj, multiplier):
if isinstance(obj, Sequence):
return [o * multiplier for o in obj]
else:
return obj * multiplier
def is_match(word, word_list):
for keyword in word_list:
if keyword in word:
return True
return False
def weighted_loss(loss: dict, weight, ignore_keys=[], warmup=0):
_step_counter["weight"] += 1
lambda_weight = (
lambda x: x * (_step_counter["weight"] - 1) / warmup
if _step_counter["weight"] <= warmup
else x
)
if isinstance(weight, Mapping):
for k, v in weight.items():
for name, loss_item in loss.items():
if (k in name) and ("loss" in name):
loss[name] = sequence_mul(loss[name], lambda_weight(v))
elif isinstance(weight, Number):
for name, loss_item in loss.items():
if "loss" in name:
if not is_match(name, ignore_keys):
loss[name] = sequence_mul(loss[name], lambda_weight(weight))
else:
loss[name] = sequence_mul(loss[name], 0.0)
else:
raise NotImplementedError()
return loss
| 4,372 | 27.212903 | 88 | py |
PseCo | PseCo-master/ssod/utils/exts/optimizer_constructor.py | import warnings
import torch
from torch.nn import GroupNorm, LayerNorm
from mmcv.utils import _BatchNorm, _InstanceNorm, build_from_cfg
from mmcv.utils.ext_loader import check_ops_exist
from mmcv.runner.optimizer.builder import OPTIMIZER_BUILDERS, OPTIMIZERS
from mmcv.runner.optimizer import DefaultOptimizerConstructor
@OPTIMIZER_BUILDERS.register_module()
class NamedOptimizerConstructor(DefaultOptimizerConstructor):
"""Main difference to default constructor:
1) Add name to parame groups
"""
def add_params(self, params, module, prefix="", is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
# get param-wise options
custom_keys = self.paramwise_cfg.get("custom_keys", {})
# first sort with alphabet order and then sort with reversed len of str
sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)
bias_lr_mult = self.paramwise_cfg.get("bias_lr_mult", 1.0)
bias_decay_mult = self.paramwise_cfg.get("bias_decay_mult", 1.0)
norm_decay_mult = self.paramwise_cfg.get("norm_decay_mult", 1.0)
dwconv_decay_mult = self.paramwise_cfg.get("dwconv_decay_mult", 1.0)
bypass_duplicate = self.paramwise_cfg.get("bypass_duplicate", False)
dcn_offset_lr_mult = self.paramwise_cfg.get("dcn_offset_lr_mult", 1.0)
# special rules for norm layers and depth-wise conv layers
is_norm = isinstance(module, (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))
is_dwconv = (
isinstance(module, torch.nn.Conv2d) and module.in_channels == module.groups
)
for name, param in module.named_parameters(recurse=False):
param_group = {"params": [param], "name": f"{prefix}.{name}"}
if not param.requires_grad:
params.append(param_group)
continue
if bypass_duplicate and self._is_in(param_group, params):
warnings.warn(
f"{prefix} is duplicate. It is skipped since "
f"bypass_duplicate={bypass_duplicate}"
)
continue
# if the parameter match one of the custom keys, ignore other rules
is_custom = False
for key in sorted_keys:
if key in f"{prefix}.{name}":
is_custom = True
lr_mult = custom_keys[key].get("lr_mult", 1.0)
param_group["lr"] = self.base_lr * lr_mult
if self.base_wd is not None:
decay_mult = custom_keys[key].get("decay_mult", 1.0)
param_group["weight_decay"] = self.base_wd * decay_mult
break
if not is_custom:
# bias_lr_mult affects all bias parameters
# except for norm.bias dcn.conv_offset.bias
if name == "bias" and not (is_norm or is_dcn_module):
param_group["lr"] = self.base_lr * bias_lr_mult
if (
prefix.find("conv_offset") != -1
and is_dcn_module
and isinstance(module, torch.nn.Conv2d)
):
# deal with both dcn_offset's bias & weight
param_group["lr"] = self.base_lr * dcn_offset_lr_mult
# apply weight decay policies
if self.base_wd is not None:
# norm decay
if is_norm:
param_group["weight_decay"] = self.base_wd * norm_decay_mult
# depth-wise conv
elif is_dwconv:
param_group["weight_decay"] = self.base_wd * dwconv_decay_mult
# bias lr and decay
elif name == "bias" and not is_dcn_module:
# TODO: current bias_decay_mult will have affect on DCN
param_group["weight_decay"] = self.base_wd * bias_decay_mult
params.append(param_group)
if check_ops_exist():
from mmcv.ops import DeformConv2d, ModulatedDeformConv2d
is_dcn_module = isinstance(module, (DeformConv2d, ModulatedDeformConv2d))
else:
is_dcn_module = False
for child_name, child_mod in module.named_children():
child_prefix = f"{prefix}.{child_name}" if prefix else child_name
self.add_params(
params, child_mod, prefix=child_prefix, is_dcn_module=is_dcn_module
)
| 5,163 | 44.298246 | 87 | py |
PseCo | PseCo-master/ssod/utils/hooks/weights_summary.py | import os.path as osp
import torch.distributed as dist
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
from ..logger import get_root_logger
from prettytable import PrettyTable
def bool2str(input):
if input:
return "Y"
else:
return "N"
def unknown():
return "-"
def shape_str(size):
size = [str(s) for s in size]
return "X".join(size)
def min_max_str(input):
return "Min:{:.3f} Max:{:.3f}".format(input.min(), input.max())
def construct_params_dict(input):
assert isinstance(input, list)
param_dict = {}
for group in input:
if "name" in group:
param_dict[group["name"]] = group
return param_dict
def max_match_sub_str(strs, sub_str):
# find most related str for sub_str
matched = None
for child in strs:
if len(child) <= len(sub_str):
if child == sub_str:
return child
elif sub_str[: len(child)] == child:
if matched is None or len(matched) < len(child):
matched = child
return matched
def get_optim(optimizer, params_dict, name, key):
rel_name = max_match_sub_str(list(params_dict.keys()), name)
if rel_name is not None:
return params_dict[rel_name][key]
else:
if key in optimizer.defaults:
return optimizer.defaults[key]
@HOOKS.register_module()
class WeightSummary(Hook):
def before_run(self, runner):
if runner.rank != 0:
return
if is_module_wrapper(runner.model):
model = runner.model.module
else:
model = runner.model
weight_summaries = self.collect_model_info(model, optimizer=runner.optimizer)
logger = get_root_logger()
logger.info(weight_summaries)
@staticmethod
def collect_model_info(model, optimizer=None, rich_text=False):
param_groups = None
if optimizer is not None:
param_groups = construct_params_dict(optimizer.param_groups)
if not rich_text:
table = PrettyTable(
["Name", "Optimized", "Shape", "Value Scale [Min,Max]", "Lr", "Wd"]
)
for name, param in model.named_parameters():
table.add_row(
[
name,
bool2str(param.requires_grad),
shape_str(param.size()),
min_max_str(param),
unknown()
if param_groups is None
else get_optim(optimizer, param_groups, name, "lr"),
unknown()
if param_groups is None
else get_optim(optimizer, param_groups, name, "weight_decay"),
]
)
return "\n" + table.get_string(title="Model Information")
else:
pass
| 2,954 | 27.970588 | 86 | py |
PseCo | PseCo-master/ssod/utils/hooks/submodules_evaluation.py | import os.path as osp
import torch.distributed as dist
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, LoggerHook, WandbLoggerHook
from mmdet.core import DistEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
@HOOKS.register_module()
class SubModulesDistEvalHook(DistEvalHook):
def __init__(self, *args, evaluated_modules=None, **kwargs):
super().__init__(*args, **kwargs)
self.evaluated_modules = evaluated_modules
def before_run(self, runner):
if is_module_wrapper(runner.model):
model = runner.model.module
else:
model = runner.model
assert hasattr(model, "submodules")
assert hasattr(model, "inference_on")
def after_train_iter(self, runner):
"""Called after every training iter to evaluate the results."""
if not self.by_epoch and self._should_evaluate(runner):
for hook in runner._hooks:
if isinstance(hook, WandbLoggerHook):
_commit_state = hook.commit
hook.commit = False
if isinstance(hook, LoggerHook):
hook.after_train_iter(runner)
if isinstance(hook, WandbLoggerHook):
hook.commit = _commit_state
runner.log_buffer.clear()
self._do_evaluate(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module, _BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, ".eval_hook")
if is_module_wrapper(runner.model):
model_ref = runner.model.module
else:
model_ref = runner.model
if not self.evaluated_modules:
submodules = model_ref.submodules
else:
submodules = self.evaluated_modules
key_scores = []
from mmdet.apis import multi_gpu_test
for submodule in submodules:
# change inference on
model_ref.inference_on = submodule
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect,
)
if runner.rank == 0:
key_score = self.evaluate(runner, results, prefix=submodule)
if key_score is not None:
key_scores.append(key_score)
if runner.rank == 0:
runner.log_buffer.ready = True
if len(key_scores) == 0:
key_scores = [None]
best_score = key_scores[0]
for key_score in key_scores:
if hasattr(self, "compare_func") and self.compare_func(
key_score, best_score
):
best_score = key_score
print("\n")
# runner.log_buffer.output["eval_iter_num"] = len(self.dataloader)
if self.save_best:
self._save_ckpt(runner, best_score)
def evaluate(self, runner, results, prefix=""):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs
)
for name, val in eval_res.items():
runner.log_buffer.output[(".").join([prefix, name])] = val
if self.save_best is not None:
if self.key_indicator == "auto":
# infer from eval_results
self._init_rule(self.rule, list(eval_res.keys())[0])
return eval_res[self.key_indicator]
return None
| 4,468 | 35.631148 | 81 | py |
PseCo | PseCo-master/ssod/utils/hooks/evaluation.py | import os.path as osp
import torch.distributed as dist
from mmcv.runner.hooks import LoggerHook, WandbLoggerHook
from mmdet.core import DistEvalHook as BaseDistEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class DistEvalHook(BaseDistEvalHook):
def after_train_iter(self, runner):
"""Called after every training iter to evaluate the results."""
if not self.by_epoch and self._should_evaluate(runner):
for hook in runner._hooks:
if isinstance(hook, WandbLoggerHook):
_commit_state = hook.commit
hook.commit = False
if isinstance(hook, LoggerHook):
hook.after_train_iter(runner)
if isinstance(hook, WandbLoggerHook):
hook.commit = _commit_state
runner.log_buffer.clear()
self._do_evaluate(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module, _BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, ".eval_hook")
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect
)
if runner.rank == 0:
print("\n")
# runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
| 2,247 | 37.758621 | 86 | py |
PseCo | PseCo-master/thirdparty/mmdetection/setup.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection Contributors',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 7,838 | 34.958716 | 125 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| 9,363 | 38.179916 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 6,962 | 35.647368 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/deployment/test_torchserver.py | from argparse import ArgumentParser
import numpy as np
import requests
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.core import bbox2result
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def parse_result(input, model_class):
bbox = []
label = []
score = []
for anchor in input:
bbox.append(anchor['bbox'])
label.append(model_class.index(anchor['class_name']))
score.append([anchor['score']])
bboxes = np.append(bbox, score, axis=1)
labels = np.array(label)
result = bbox2result(bboxes, labels, len(model_class))
return result
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
model_result = inference_detector(model, args.img)
for i, anchor_set in enumerate(model_result):
anchor_set = anchor_set[anchor_set[:, 4] >= 0.5]
model_result[i] = anchor_set
# show the results
show_result_pyplot(
model,
args.img,
model_result,
score_thr=args.score_thr,
title='pytorch_result')
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
server_result = parse_result(response.json(), model.CLASSES)
show_result_pyplot(
model,
args.img,
server_result,
score_thr=args.score_thr,
title='server_result')
for i in range(len(model.CLASSES)):
assert np.allclose(model_result[i], server_result[i])
if __name__ == '__main__':
args = parse_args()
main(args)
| 2,357 | 30.44 | 77 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/deployment/mmdet2torchserve.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,693 | 32.279279 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/deployment/onnx2tensorrt.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import torch
from mmcv import Config
from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine
from mmdet.core.export import preprocess_example_input
from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector,
TensorRTDetector)
from mmdet.datasets import DATASETS
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def onnx2tensorrt(onnx_file,
trt_file,
input_config,
verify=False,
show=False,
workspace_size=1,
verbose=False):
import tensorrt as trt
onnx_model = onnx.load(onnx_file)
max_shape = input_config['max_shape']
min_shape = input_config['min_shape']
opt_shape = input_config['opt_shape']
fp16_mode = False
# create trt engine and wraper
opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
img_list = [_.cuda().contiguous() for _ in img_list]
# wrap ONNX and TensorRT model
onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0)
trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0)
# inference with wrapped model
with torch.no_grad():
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
trt_results = trt_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
if show:
out_file_ort, out_file_trt = None, None
else:
out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png'
show_img = one_meta['show_img']
score_thr = 0.3
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
trt_model.show_result(
show_img,
trt_results,
score_thr=score_thr,
show=True,
win_name='TensorRT',
out_file=out_file_trt)
with_mask = trt_model.with_masks
# compare a part of result
if with_mask:
compare_pairs = list(zip(onnx_results, trt_results))
else:
compare_pairs = [(onnx_results, trt_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models from ONNX to TensorRT')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Filename of input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
default='tmp.trt',
help='Filename of output TensorRT engine')
parser.add_argument(
'--input-img', type=str, default='', help='Image for test')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be \
removed in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine. Defaults to False.')
parser.add_argument(
'--to-rgb',
action='store_false',
help='Feed model with RGB or BGR image. Default is RGB. This \
argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[400, 600],
help='Input size of the model')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='Mean value used for preprocess input data. This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='Variance value used for preprocess input data. \
This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--min-shape',
type=int,
nargs='+',
default=None,
help='Minimum input size of the model in TensorRT')
parser.add_argument(
'--max-shape',
type=int,
nargs='+',
default=None,
help='Maximum input size of the model in TensorRT')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
warnings.warn(
'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and will be \
removed in future releases.')
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg')
cfg = Config.fromfile(args.config)
def parse_shape(shape):
if len(shape) == 1:
shape = (1, 3, shape[0], shape[0])
elif len(args.shape) == 2:
shape = (1, 3) + tuple(shape)
else:
raise ValueError('invalid input shape')
return shape
if args.shape:
input_shape = parse_shape(args.shape)
else:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
if not args.max_shape:
max_shape = input_shape
else:
max_shape = parse_shape(args.max_shape)
if not args.min_shape:
min_shape = input_shape
else:
min_shape = parse_shape(args.min_shape)
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
CLASSES = dataset.CLASSES
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
input_config = {
'min_shape': min_shape,
'opt_shape': input_shape,
'max_shape': max_shape,
'input_shape': input_shape,
'input_path': args.input_img,
'normalize_cfg': normalize_cfg
}
# Create TensorRT engine
onnx2tensorrt(
args.model,
args.trt_file,
input_config,
verify=args.verify,
show=args.show,
workspace_size=args.workspace_size,
verbose=args.verbose)
| 8,515 | 32.396078 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/deployment/mmdet_handler.py | # Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
'class_name': class_name,
'bbox': bbox_coords,
'score': score
})
return output
| 2,560 | 34.569444 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/deployment/pytorch2onnx.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import warnings
from functools import partial
import numpy as np
import onnx
import torch
from mmcv import Config, DictAction
from mmdet.core.export import build_model_from_cfg, preprocess_example_input
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
def pytorch2onnx(model,
input_img,
input_shape,
normalize_cfg,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
test_img=None,
do_simplify=False,
dynamic_export=None,
skip_postprocess=False):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
if skip_postprocess:
warnings.warn('Not all models support export onnx without post '
'process, especially two stage detectors!')
model.forward = model.forward_dummy
torch.onnx.export(
model,
one_img,
output_file,
input_names=['input'],
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model without '
f'post process: {output_file}')
return
# replace original forward function
origin_forward = model.forward
model.forward = partial(
model.forward,
img_metas=img_meta_list,
return_loss=False,
rescale=False)
output_names = ['dets', 'labels']
if model.with_mask:
output_names.append('masks')
input_name = 'input'
dynamic_axes = None
if dynamic_export:
dynamic_axes = {
input_name: {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
if model.with_mask:
dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'}
torch.onnx.export(
model,
img_list,
output_file,
input_names=[input_name],
output_names=output_names,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
model.forward = origin_forward
# get the custom op path
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
if do_simplify:
import onnxsim
from mmdet import digit_version
min_required_version = '0.3.0'
assert digit_version(onnxsim.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
input_dic = {'input': img_list[0].detach().cpu().numpy()}
onnxsim.simplify(
output_file, input_data=input_dic, custom_lib=ort_custom_op_path)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# wrap onnx model
onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0)
if dynamic_export:
# scale up to test dynamic shape
h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]]
h, w = min(1344, h), min(1344, w)
input_config['input_shape'] = (1, 3, h, w)
if test_img is None:
input_config['input_path'] = input_img
# prepare input once again
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
# get pytorch output
with torch.no_grad():
pytorch_results = model(
img_list,
img_metas=img_meta_list,
return_loss=False,
rescale=True)[0]
img_list = [_.cuda().contiguous() for _ in img_list]
if dynamic_export:
img_list = img_list + [_.flip(-1).contiguous() for _ in img_list]
img_meta_list = img_meta_list * 2
# get onnx output
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
# visualize predictions
score_thr = 0.3
if show:
out_file_ort, out_file_pt = None, None
else:
out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png'
show_img = one_meta['show_img']
model.show_result(
show_img,
pytorch_results,
score_thr=score_thr,
show=True,
win_name='PyTorch',
out_file=out_file_pt)
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
# compare a part of result
if model.with_mask:
compare_pairs = list(zip(onnx_results, pytorch_results))
else:
compare_pairs = [(onnx_results, pytorch_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument(
'--show',
action='store_true',
help='Show onnx graph and detection outputs')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be removed \
in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--simplify',
action='store_true',
help='Whether to simplify onnx model.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[800, 1216],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='mean value used for preprocess input data.This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='variance value used for preprocess input data. '
'This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
parser.add_argument(
'--skip-postprocess',
action='store_true',
help='Whether to export model without post process. Experimental '
'option. We do not guarantee the correctness of the exported '
'model.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and \
will be removed in future releases.')
assert args.opset_version == 11, 'MMDet only support opset 11 now'
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(args.opset_version)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.shape is None:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
elif len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
# build the model and load checkpoint
model = build_model_from_cfg(args.config, args.checkpoint,
args.cfg_options)
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg')
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
# convert model to onnx file
pytorch2onnx(
model,
args.input_img,
input_shape,
normalize_cfg,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
test_img=args.test_img,
do_simplify=args.simplify,
dynamic_export=args.dynamic_export,
skip_postprocess=args.skip_postprocess)
| 11,474 | 32.949704 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/model_converters/selfsup2mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
| 1,243 | 27.930233 | 74 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/model_converters/publish_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,301 | 28.590909 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/model_converters/regnet2mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,063 | 32.67033 | 77 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/model_converters/upgrade_model_version.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import re
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def is_head(key):
valid_head_list = [
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
]
return any(key.startswith(h) for h in valid_head_list)
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
is_two_stage = True
is_ssd = False
is_retina = False
reg_cls_agnostic = False
if 'rpn_head' not in config.model:
is_two_stage = False
# check whether it is SSD
if config.model.bbox_head.type == 'SSDHead':
is_ssd = True
elif config.model.bbox_head.type == 'RetinaHead':
is_retina = True
elif isinstance(config.model['bbox_head'], list):
reg_cls_agnostic = True
elif 'reg_class_agnostic' in config.model.bbox_head:
reg_cls_agnostic = config.model.bbox_head \
.reg_class_agnostic
temp_file.close()
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
def reorder_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_cls for softmax output
if out_channels != num_classes and out_channels % num_classes == 0:
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
new_val = new_val.reshape(val.size())
# fc_cls
elif out_channels == num_classes:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# agnostic | retina_cls | rpn_cls
else:
new_val = val
return new_val
def truncate_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
if val.size(0) % num_classes == 0:
new_val = val[:num_classes - 1]
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_logits
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def truncate_reg_channel(val, num_classes=81):
# bias
if val.dim() == 1:
# fc_reg | rpn_reg
if val.size(0) % num_classes == 0:
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
new_val = new_val.reshape(-1)
# agnostic
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# fc_reg | rpn_reg
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, -1, in_channels,
*val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def convert(in_file, out_file, num_classes):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
'#' + meta_info['config'])
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
upgrade_retina = True
else:
upgrade_retina = False
# MMDetection v2.5.0 unifies the class order in RPN
# if the model is trained in version<v2.5.0
# The RPN model should be upgraded to be used in version>=2.5.0
if meta_info['mmdet_version'] < '2.5.0':
upgrade_rpn = True
else:
upgrade_rpn = False
for key, val in in_state_dict.items():
new_key = key
new_val = val
if is_two_stage and is_head(key):
new_key = 'roi_head.{}'.format(key)
# classification
if upgrade_rpn:
m = re.search(
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
else:
m = re.search(
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
if m is not None:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
# regression
if upgrade_rpn:
m = re.search(r'(fc_reg).(weight|bias)', new_key)
else:
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
if m is not None and not reg_cls_agnostic:
print(f'truncate regression channels of {new_key}')
new_val = truncate_reg_channel(val, num_classes)
# mask head
m = re.search(r'(conv_logits).(weight|bias)', new_key)
if m is not None:
print(f'truncate mask prediction channels of {new_key}')
new_val = truncate_cls_channel(val, num_classes)
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
# Legacy issues in RetinaNet since V1.x
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
if m is not None and upgrade_retina:
param = m.groups()[1]
new_key = key.replace(param, f'conv.{param}')
out_state_dict[new_key] = val
print(f'rename the name of {key} to {new_key}')
continue
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
if m is not None and is_ssd:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
out_state_dict[new_key] = new_val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
parser.add_argument(
'--num-classes',
type=int,
default=81,
help='number of classes of the original model')
args = parser.parse_args()
convert(args.in_file, args.out_file, args.num_classes)
if __name__ == '__main__':
main()
| 6,848 | 31.459716 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/model_converters/upgrade_ssd_version.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,789 | 29.338983 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/model_converters/detectron2pytorch.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,578 | 41.607143 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/analysis_tools/benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import time
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def measure_inferense_speed(cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn):
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=True,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, checkpoint, map_location='cpu')
if is_fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
break
return fps
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher, **cfg.dist_params)
measure_inferense_speed(cfg, args.checkpoint, args.max_iter,
args.log_interval, args.fuse_conv_bn)
if __name__ == '__main__':
main()
| 4,843 | 32.638889 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/analysis_tools/optimize_anchors.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides two method to optimize YOLO anchors including k-means
anchor cluster and differential evolution. You can use ``--algorithm k-means``
and ``--algorithm differential_evolution`` to switch two method.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
"""
import argparse
import os.path as osp
import mmcv
import numpy as np
import torch
from mmcv import Config
from scipy.optimize import differential_evolution
from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh
from mmdet.datasets import build_dataset
from mmdet.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[608, 608],
help='input image size')
parser.add_argument(
'--algorithm',
default='differential_evolution',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
ann = self.dataset.get_ann_info(idx)
data_info = self.dataset.data_infos[idx]
img_shape = np.array([data_info['width'], data_info['height']])
gt_bboxes = ann['bboxes']
for bbox in gt_bboxes:
wh = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(wh)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
for w, h in anchors:
anchor_results.append([round(w), round(h)])
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
mmcv.dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
"""
def __init__(self, num_anchors, iters, **kwargs):
super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = mmcv.ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
num_anchors,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super(YOLODEAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = get_root_logger()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.anchor_generator.type
assert anchor_type == 'YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes
num_anchors = sum([len(sizes) for sizes in base_sizes])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = build_dataset(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
elif args.algorithm == 'differential_evolution':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 13,161 | 34.477089 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/analysis_tools/get_flops.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
orig_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != orig_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {orig_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 3,194 | 30.323529 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/tools/analysis_tools/test_robustness.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import mmcv
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tools.analysis_tools.robustness_eval import get_results
from mmdet import datasets
from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test
from mmdet.core import eval_map
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def coco_eval_with_return(result_files,
result_types,
coco,
max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in ['proposal', 'bbox', 'segm', 'keypoints']
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if res_type == 'segm' or res_type == 'bbox':
metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
]
eval_results[res_type] = {
metric_names[i]: cocoEval.stats[i]
for i in range(len(metric_names))
}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
def voc_eval_with_return(result_file,
dataset,
iou_thr=0.5,
logger='print',
only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
mean_ap, eval_results = eval_map(
det_results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
logger=logger)
if only_ap:
eval_results = [{
'ap': eval_results[i]['ap']
} for i in range(len(eval_results))]
return mean_ap, eval_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for pascal voc evaluation')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument(
'--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.workers == 0:
args.workers = cfg.data.workers_per_gpu
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed)
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
rank, _ = get_dist_info()
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_data_cfg['pipeline'].insert(1, corruption_trans)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
# build the dataloader
# TODO: support multiple images per gpu
# (only minor changes are needed)
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.workers,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(
model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
show_dir = args.show_dir
if show_dir is not None:
show_dir = osp.join(show_dir, corruption)
show_dir = osp.join(show_dir, str(corruption_severity))
if not osp.exists(show_dir):
osp.makedirs(show_dir)
outputs = single_gpu_test(model, data_loader, args.show,
show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if args.out and rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if cfg.dataset_type == 'VOCDataset':
if eval_types:
for eval_type in eval_types:
if eval_type == 'bbox':
test_dataset = mmcv.runner.obj_from_dict(
cfg.data.test, datasets)
logger = 'print' if args.summaries else None
mean_ap, eval_results = \
voc_eval_with_return(
args.out, test_dataset,
args.iou_thr, logger)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation \
is supported for pascal voc')
else:
if eval_types:
print(f'Starting evaluate {" and ".join(eval_types)}')
if eval_types == ['proposal_fast']:
result_file = args.out
else:
if not isinstance(outputs[0], dict):
result_files = dataset.results2json(
outputs, args.out)
else:
for name in outputs[0]:
print(f'\nEvaluating {name}')
outputs_ = [out[name] for out in outputs]
result_file = args.out
+ f'.{name}'
result_files = dataset.results2json(
outputs_, result_file)
eval_results = coco_eval_with_return(
result_files, eval_types, dataset.coco)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;'
'\nUse --eval to select a task')
# save results after each evaluation
mmcv.dump(aggregated_results, eval_results_filename)
if rank == 0:
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 15,421 | 38.341837 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/.dev_scripts/benchmark_filter.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 7,096 | 41.244048 | 92 | py |
PseCo | PseCo-master/thirdparty/mmdetection/.dev_scripts/gather_models.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import shutil
import subprocess
from collections import OrderedDict
import mmcv
import torch
import yaml
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# remove ema state_dict
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def get_final_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
return cfg.runner.max_epochs
def get_real_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
epoch *= cfg.data.train.times
return epoch
def get_final_results(log_json_path, epoch, results_lut):
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if log_line['mode'] == 'train' and log_line['epoch'] == epoch:
result_dict['memory'] = log_line['memory']
if log_line['mode'] == 'val' and log_line['epoch'] == epoch:
result_dict.update({
key: log_line[key]
for key in results_lut if key in log_line
})
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
CocoPanopticDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face')
cfg = mmcv.Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
epochs = get_real_epoch(model['config'])
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
meta_data['Epochs'] = epochs
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mmcv.mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
# check whether the exps is finished
final_epoch = get_final_epoch(used_config)
final_model = 'epoch_{}.pth'.format(final_epoch)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = mmcv.Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
# case when using VOC, the evaluation key is only 'mAP'
# when using Panoptic Dataset, the evaluation key is 'PQ'.
for i, key in enumerate(results_lut):
if 'mAP' not in key and 'PQ' not in key:
results_lut[i] = key + 'm_AP'
model_performance = get_final_results(log_json_path, final_epoch,
results_lut)
if model_performance is None:
continue
model_time = osp.split(log_txt_path)[-1].split('.')[0]
model_infos.append(
dict(
config=used_config,
results=model_performance,
epochs=final_epoch,
model_time=model_time,
log_json_path=osp.split(log_json_path)[-1]))
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mmcv.mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['model_time']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
'epoch_{}.pth'.format(model['epochs']))
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(
osp.join(models_root, model['config'], model['log_json_path']),
osp.join(model_publish_dir, f'{model_name}.log.json'))
shutil.copy(
osp.join(models_root, model['config'],
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, f'{model_name}.log'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_cconfig_path = osp.split(config_path)[-1]
shutil.copy(config_path,
osp.join(model_publish_dir, target_cconfig_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 9,899 | 34.869565 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/.dev_scripts/benchmark_inference_fps.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from tools.analysis_tools.benchmark import measure_inferense_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=400, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=40, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = measure_inferense_speed(cfg, checkpoint, args.max_iter,
args.log_interval,
args.fuse_conv_bn)
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000/fps:.{args.round_num}f} ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{config} error: {repr(e)}')
result_dict[cfg_path] = 0
if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
| 3,626 | 37.178947 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/.dev_scripts/batch_test_list.py | # Copyright (c) OpenMMLab. All rights reserved.
# yapf: disable
atss = dict(
config='configs/atss/atss_r50_fpn_1x_coco.py',
checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
autoassign = dict(
config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
carafe = dict(
config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.6),
)
cascade_rcnn = [
dict(
config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
eval='bbox',
metric=dict(bbox_mAP=40.3),
),
dict(
config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
),
]
cascade_rpn = dict(
config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
centripetalnet = dict(
config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa
checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.7),
)
cornernet = dict(
config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py',
checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.2),
)
dcn = dict(
config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
eval='bbox',
metric=dict(bbox_mAP=41.3),
)
deformable_detr = dict(
config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.5),
)
detectors = dict(
config='configs/detectors/detectors_htc_r50_1x_coco.py',
checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
)
detr = dict(
config='configs/detr/detr_r50_8x2_150e_coco.py',
checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
eval='bbox',
metric=dict(bbox_mAP=40.1),
)
double_heads = dict(
config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
dynamic_rcnn = dict(
config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
eval='bbox',
metric=dict(bbox_mAP=38.9),
)
empirical_attention = dict(
config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa
checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
faster_rcnn = dict(
config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
fcos = dict(
config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa
checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
foveabox = dict(
config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
free_anchor = dict(
config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
fsaf = dict(
config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
gcnet = dict(
config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
)
gfl = dict(
config='configs/gfl/gfl_r50_fpn_1x_coco.py',
checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
eval='bbox',
metric=dict(bbox_mAP=40.2),
)
gn = dict(
config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
)
gn_ws = dict(
config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
eval='bbox',
metric=dict(bbox_mAP=39.7),
)
grid_rcnn = dict(
config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
groie = dict(
config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
guided_anchoring = [
dict(
config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa
checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
),
dict(
config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.6),
),
]
hrnet = dict(
config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py',
checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
htc = dict(
config='configs/htc/htc_r50_fpn_1x_coco.py',
checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
)
libra_rcnn = dict(
config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
mask_rcnn = dict(
config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
)
ms_rcnn = dict(
config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
)
nas_fcos = dict(
config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa
checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
nas_fpn = dict(
config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
paa = dict(
config='configs/paa/paa_r50_fpn_1x_coco.py',
checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
pafpn = dict(
config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
pisa = dict(
config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
eval='bbox',
metric=dict(bbox_mAP=38.4),
)
point_rend = dict(
config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
)
regnet = dict(
config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
)
reppoints = dict(
config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py',
checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
res2net = dict(
config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
eval='bbox',
metric=dict(bbox_mAP=43.0),
)
resnest = dict(
config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa
checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.0),
)
retinanet = dict(
config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
eval='bbox',
metric=dict(bbox_mAP=36.5),
)
rpn = dict(
config='configs/rpn/rpn_r50_fpn_1x_coco.py',
checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
eval='proposal_fast',
metric=dict(AR_1000=58.2),
)
sabl = [
dict(
config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.7),
),
dict(
config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
eval='bbox',
metric=dict(bbox_mAP=39.9),
),
]
scnet = dict(
config='configs/scnet/scnet_r50_fpn_1x_coco.py',
checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
eval='bbox',
metric=dict(bbox_mAP=43.5),
)
sparse_rcnn = dict(
config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
ssd = [
dict(
config='configs/ssd/ssd300_coco.py',
checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
eval='bbox',
metric=dict(bbox_mAP=25.5),
),
dict(
config='configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py',
checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',# noqa
eval='bbox',
metric=dict(bbox_mAP=21.3),
),
]
tridentnet = dict(
config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
eval='bbox',
metric=dict(bbox_mAP=37.6),
)
vfnet = dict(
config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
eval='bbox',
metric=dict(bbox_mAP=41.6),
)
yolact = dict(
config='configs/yolact/yolact_r50_1x8_coco.py',
checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
)
yolo = dict(
config='configs/yolo/yolov3_d53_320_273e_coco.py',
checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
eval='bbox',
metric=dict(bbox_mAP=27.9),
)
yolof = dict(
config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
centernet = dict(
config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=29.5),
)
yolox = dict(
config='configs/yolox/yolox_tiny_8x8_300e_coco.py',
checkpoint='yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=31.5),
)
# yapf: enable
| 12,707 | 34.3 | 117 | py |
PseCo | PseCo-master/thirdparty/mmdetection/demo/webcam_demo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import torch
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=True)
if __name__ == '__main__':
main()
| 1,308 | 26.270833 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 423 | 27.266667 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 201 | 27.857143 | 61 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 423 | 27.266667 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 557 | 31.823529 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py | _base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 591 | 28.6 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
]),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
| 8,333 | 34.164557 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py | _base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 591 | 28.6 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py | _base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
data = dict(
samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline))
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 1,489 | 32.863636 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/htc/htc_r101_fpn_20e_coco.py | _base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 295 | 28.6 | 61 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 340 | 36.888889 | 72 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 217 | 30.142857 | 61 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RepPointsDetector',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RepPointsHead',
num_classes=80,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
optimizer = dict(lr=0.01)
| 2,065 | 29.382353 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 562 | 32.117647 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 585 | 29.842105 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 461 | 26.176471 | 76 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 406 | 28.071429 | 61 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/gfl/gfl_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='GFL',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
| 1,739 | 29 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 529 | 32.125 | 72 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py | _base_ = 'tridentnet_r50_caffe_1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(train=dict(pipeline=train_pipeline))
| 756 | 31.913043 | 72 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py | _base_ = 'tridentnet_r50_caffe_mstrain_1x_coco.py'
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
| 138 | 26.8 | 53 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py | _base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='TridentFasterRCNN',
backbone=dict(
type='TridentResNet',
trident_dilations=(1, 2, 3),
num_branch=3,
test_branch_idx=1,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1),
train_cfg=dict(
rpn_proposal=dict(max_per_img=500),
rcnn=dict(
sampler=dict(num=128, pos_fraction=0.5,
add_gt_as_proposals=False))))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 1,868 | 32.375 | 74 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
| 2,488 | 29.728395 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=128),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
| 2,478 | 29.9875 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/paa/paa_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PAA',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
| 2,120 | 28.873239 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/paa/paa_r101_fpn_mstrain_3x_coco.py | _base_ = './paa_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 199 | 27.571429 | 61 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/paa/paa_r101_fpn_1x_coco.py | _base_ = './paa_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 191 | 26.428571 | 61 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/yolact/yolact_r50_1x8_coco.py | _base_ = '../_base_/default_runtime.py'
# model settings
img_size = 550
model = dict(
type='YOLACT',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1, # do not freeze stem
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False, # update the statistics of bn
zero_init_residual=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5,
upsample_cfg=dict(mode='bilinear')),
bbox_head=dict(
type='YOLACTHead',
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True),
mask_head=dict(
type='YOLACTProtonet',
in_channels=256,
num_protos=32,
num_classes=80,
max_masks_to_train=100,
loss_mask_weight=6.125),
segm_head=dict(
type='YOLACTSegmHead',
num_classes=80,
in_channels=256,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
# smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
iou_thr=0.5,
top_k=200,
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(img_size, img_size),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[20, 42, 49, 52])
runner = dict(type='EpochBasedRunner', max_epochs=55)
cudnn_benchmark = True
evaluation = dict(metric=['bbox', 'segm'])
| 5,103 | 30.701863 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/yolact/yolact_r101_1x8_coco.py | _base_ = './yolact_r50_1x8_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 192 | 23.125 | 61 | py |
PseCo | PseCo-master/thirdparty/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py | _base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
# model settings
model = dict(
type='PointRend',
roi_head=dict(
type='PointRendRoIHead',
mask_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='concat',
roi_layer=dict(
_delete_=True, type='SimpleRoIAlign', output_size=14),
out_channels=256,
featmap_strides=[4]),
mask_head=dict(
_delete_=True,
type='CoarseMaskHead',
num_fcs=2,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
point_head=dict(
type='MaskPointHead',
num_fcs=3,
in_channels=256,
fc_channels=256,
num_classes=80,
coarse_pred_each_layer=True,
loss_point=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
mask_size=7,
num_points=14 * 14,
oversample_ratio=3,
importance_sample_ratio=0.75)),
test_cfg=dict(
rcnn=dict(
subdivision_steps=5,
subdivision_num_points=28 * 28,
scale_factor=2)))
| 1,453 | 31.311111 | 75 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.