repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/murtysplitSimple.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 3/14/19
These functions reorder rows and columns before creating subproblems.
The goal is to set it up so the first subproblem fixes everything
but the first non-missing row.
One row and column is unfixed (w/ match or miss eliminated) every new problem.
"""
import numba as nb
from sparsity import nbsparsedtype
nbpairtype = nb.typeof((0,0))
# reorder rows so that misses are first
# last row should always remain last, so previous eliminations are kept
# reorder columns so that they are eliminated in order along with the rows
@nb.jit(nbpairtype(nbsparsedtype[:,:], nb.i8[:], nb.i8[:], nb.f8[:], nb.i8[:],
nb.i8, nb.i8[:], nb.i8), nopython=True)
def murtySplit(c, x, y, v, rows2use, m2, cols2use, n2):
m3 = 0 # number of missing rows
for ri in xrange(m2-1):
i = rows2use[ri]
j = x[i]
if j == -1: # missing row
rows2use[ri] = rows2use[m3]
rows2use[m3] = i
m3 += 1
if x[rows2use[m2-1]] == -1:
m2 -= 1
n3 = 0 # number of missing columns
for cj in xrange(n2):
j = cols2use[cj]
if y[j] == -1:
cols2use[cj] = cols2use[n3]
cols2use[n3] = j
n3 += 1
assert n2-n3==m2-m3 # number of reported matches is the same
cols2use[n3:n2] = x[rows2use[m3:m2]]
# if there are missing columns, must eliminate on all rows
# if no missing columns, can eliminate only matched rows
return (0, n3) if n3 > 0 else (m3, 0)
| 1,533
| 34.674419
| 78
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/example_3frame.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 3/11/19
"""
import numpy as np
from time import time
#from daSparse import da, allocateWorkVarsforDA
#from sparsity import sparsify
from daDense import da, allocateWorkVarsforDA
from sspDense import SSP # used for evaluation
ntests = 10
max_ns = 1000
max_nhyp = 1000
s = 10
entryrate = 100 # poisson rate of object entry
fpratio = .005 # poisson rate of fp msmts, wrt entry rate
detect_rate = .995 # detection probability at each time
std = .001 # standard deviation of msmt noise
miss_distance_cutoffs = np.arange(.1,1.01,.1)
np.random.seed(34)
fprate = fpratio*entryrate
def likelihood1(c, msmts1, msmts2):
# constant term in NLL of normal distribution
var_constant = 4*std**2 # normalizing out inv(Sigma)
constant_term = .5*np.log(np.pi*var_constant)
constant_term += np.log(fpratio/detect_rate+1-detect_rate)*2 + np.log(entryrate)
# get nll of all pairs from first two measurement sets
for i,msmti in enumerate(msmts1):
for j,msmtj in enumerate(msmts2):
c[i,j] = np.square(msmti[0]-msmtj[0])/var_constant + constant_term
pair_miss_exist_prob = (1-detect_rate)*detect_rate/(fpratio+(1-detect_rate)*detect_rate)
def update1(update_matches, msmts1, msmts2, samples, weights):
match_var = std**2 / 2
miss_var = std**2
for sidx, update_match in enumerate(update_matches):
i,j = update_match
if i!=-1 and j!=-1:
samples[sidx] = ((msmts1[i,0]+msmts2[j,0])*.5, msmts1[i,1], msmts2[j,1],
match_var, miss_var, miss_var)
weights[sidx] = 1.
elif i!=-1:
samples[sidx] = (msmts1[i,0], msmts1[i,1], -1,
miss_var, miss_var, -1)
weights[sidx] = pair_miss_exist_prob
elif j!=-1:
samples[sidx] = (msmts2[j,0], -1, msmts2[j,1],
miss_var, -1, miss_var)
weights[sidx] = pair_miss_exist_prob
else:
# all the null updates should be at the end of the array
return sidx
return update_matches.shape[0]
third_miss_loglik = np.log(entryrate) + np.log((1-detect_rate)**2*detect_rate + fpratio)
def likelihood2(c, samples, weights, ns, msmts3):
twopiterm = np.log(2*np.pi)*.5
msmt_var = std**2 * 2
nm = len(msmts3)
for i in xrange(ns):
sample = samples[i]
if sample[5] == -1: # only msmt1, so only match on 2nd dimension
constant_term_i = third_miss_loglik
constant_term_i += np.log(1./pair_miss_exist_prob/detect_rate-1)
constant_term_i += np.log(msmt_var)*.5
constant_term_i += twopiterm
c[i,:nm] = np.square(sample[1]-msmts3[:,0])/msmt_var + constant_term_i
elif sample[4] == -1: # only msmt2, so only match on 3rd dimension
constant_term_i = third_miss_loglik
constant_term_i += np.log(1./pair_miss_exist_prob/detect_rate-1)
constant_term_i += np.log(msmt_var)*.5
constant_term_i += twopiterm
c[i,:nm] = np.square(sample[2]-msmts3[:,1])/msmt_var + constant_term_i
else: # both
constant_term_i = third_miss_loglik
constant_term_i += np.log(1./detect_rate-1)
constant_term_i += np.log(msmt_var)
constant_term_i += twopiterm*2
c[i,:nm] = np.square(sample[1]-msmts3[:,0])
c[i,:nm] += np.square(sample[2]-msmts3[:,1])
c[i,:nm] /= msmt_var
c[i,:nm] += constant_term_i
# probability of msmt from third set, with no matches, being real and not fp
third_exist_prob = (1-detect_rate)**2*detect_rate
third_exist_prob = third_exist_prob / (third_exist_prob + fpratio)
def update2(update_matches2, update_matches, new_samples, new_weights, msmts1, msmts2, msmts3):
for sidx, update_match2 in enumerate(update_matches2):
new_sample = new_samples[sidx]
id12, id3 = update_match2
if id12 == -1:
if id3 == -1:
return sidx
else:
new_weights[sidx] = third_exist_prob
new_sample[0] = .5
new_sample[1:3] = msmts3[id3, :2]
else:
id1, id2 = update_matches[id12]
if sum((id1==-1, id2==-1, id3==-1)):
new_weights[sidx] = third_exist_prob
else:
new_weights[sidx] = 1.
if id1 == -1:
if id3 == -1:
new_sample[0] = msmts2[id2, 0]
new_sample[1] = .5
new_sample[2] = msmts2[id2, 1]
else:
new_sample[0] = msmts2[id2, 0]
new_sample[1] = msmts3[id3, 0]
new_sample[2] = msmts2[id2,1] + msmts3[id3,1]
elif id2 == -1:
if id3 == -1:
new_sample[0] = msmts1[id1, 0]
new_sample[1] = msmts1[id1, 1]
new_sample[2] = .5
else:
new_sample[0] = msmts1[id1,0]
new_sample[1] = msmts1[id1,1] + msmts3[id3,0]
new_sample[2] = msmts3[id3,1]
elif id3 == -1:
new_sample[0] = msmts1[id1,0] + msmts2[id2,0]
new_sample[1] = msmts1[id1,1]
new_sample[2] = msmts2[id2,1]
else:
new_sample[0] = msmts1[id1,0] + msmts2[id2,0]
new_sample[1] = msmts1[id1,1] + msmts3[id3,0]
new_sample[2] = msmts2[id2,1] + msmts3[id3,1]
def scoreObj(tru, est):
c2 = [[sum(np.square(sample[:3]-truobj)) for sample in est] for truobj in tru]
c2 = np.sqrt(c2)
m,n = c2.shape
x = np.zeros(m, dtype=int)
y = np.zeros(n, dtype=int)
pred = np.zeros(n, dtype=int)
d = np.zeros(n,)
v = np.zeros(n,)
rows2use = np.arange(m)
cols2use = np.arange(n)
scores = []
for miss_cutoff in miss_distance_cutoffs:
x[:] = -1
y[:] = -1
v[:] = 0
SSP(c2 - miss_cutoff, x, y, v, rows2use, m, cols2use, n, d, pred)
nFN = sum(x==-1)
nFP = sum(y==-1)
scores.append((nFN,nFP,m,n))
return np.array(scores)
def scoreTrack(tru_tracks, tru_m, update_matches, update_matches2):
track_found = np.zeros(tru_tracks.shape[0], dtype=bool)
fpcount = 0
pcount = 0
for id12, id3 in update_matches2:
if id12 == -1:
id1 = -1
id2 = -1
else:
id1, id2 = update_matches[id12]
if all((id1==-1,id2==-1,id3==-1)) == 3:
continue
in_tru_tracks = np.all(tru_tracks == (id1,id2,id3), axis=1)
if any(in_tru_tracks):
in_tru_tracks = np.where(in_tru_tracks)[0][0]
track_found[in_tru_tracks] = True
else:
fpcount += 1
pcount += 1
fncount = tru_m - np.sum(track_found[:tru_m])
return fncount, fpcount, tru_m, pcount
max_nm = entryrate + int(fprate*6) + 3 # poisson cdf @ 6 = .99992
timed_total_all = 0.
timed_update_all = 0.
obj_scores_all = np.zeros((miss_distance_cutoffs.shape[0],4), dtype=int)
track_scores_all = np.zeros(4, dtype=int)
samples = np.zeros((max_ns, 6))
weights = np.zeros((max_ns,))
hypotheses = np.zeros((max_nhyp, max_ns), dtype=bool)
hypothesis_weights = np.zeros((max_nhyp,))
ids = np.zeros((max_ns,), dtype=np.uint16)
ns = 0
new_samples = samples.copy()
new_weights = weights.copy()
new_hypotheses = hypotheses.copy()
new_hypothesis_weights = hypothesis_weights.copy()
new_ids = ids.copy()
new_ns = 0
c1 = np.zeros((max_ns, max_nm))
c2 = c1.copy()
update_matches = np.zeros((max_ns, 2), dtype=int)
update_matches2 = np.zeros((max_ns, 2), dtype=int)
workvars = allocateWorkVarsforDA(max_ns, max_nm, max_nhyp)
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backidx1 = workvars
backidx2 = backidx1.copy()
row_sets = np.zeros((1,max_ns), dtype=bool)
col_sets = np.zeros((1,max_nm), dtype=bool)
includerowsorcols_dummy = np.zeros(1)
for test in xrange(ntests):
# generate real objects
tru_m = entryrate#np.random.poisson(entryrate)
tru = np.random.rand(tru_m, 3)
tru_tracks = np.zeros((tru_m, 3), dtype=int) - 1
# generate three sets of measurements
detected = np.random.rand(tru_m) < detect_rate
nreal = sum(detected)
nfalse = np.random.poisson(fprate)
msmts1 = tru[detected][:,[0,1]]+np.random.normal(size=(nreal,2))*std
msmts1 = np.append(msmts1, np.random.rand(nfalse, 2), axis=0)
tru_tracks[:tru_m][detected,0] = np.arange(nreal)
tru_tracks_false = np.zeros((nfalse, 3), dtype=int)-1
tru_tracks_false[:,0] = np.arange(nreal, nreal+nfalse)
tru_tracks = np.append(tru_tracks, tru_tracks_false, axis=0)
nm1 = nreal+nfalse
detected = np.random.rand(tru_m) < detect_rate
nreal = sum(detected)
nfalse = np.random.poisson(fprate)
msmts2 = tru[detected][:,[0,2]]+np.random.normal(size=(nreal,2))*std
msmts2 = np.append(msmts2, np.random.rand(nfalse, 2), axis=0)
tru_tracks[:tru_m][detected,1] = np.arange(nreal)
tru_tracks_false = np.zeros((nfalse, 3), dtype=int)-1
tru_tracks_false[:,1] = np.arange(nreal, nreal+nfalse)
tru_tracks = np.append(tru_tracks, tru_tracks_false, axis=0)
nm2 = nreal+nfalse
detected = np.random.rand(tru_m) < detect_rate
nreal = sum(detected)
nfalse = np.random.poisson(fprate)
msmts3 = tru[detected][:,[1,2]]+np.random.normal(size=(nreal,2))*std
msmts3 = np.append(msmts3, np.random.rand(nfalse, 2), axis=0)
tru_tracks[:tru_m][detected,2] = np.arange(nreal)
tru_tracks_false = np.zeros((nfalse, 3), dtype=int)-1
tru_tracks_false[:,2] = np.arange(nreal, nreal+nfalse)
tru_tracks = np.append(tru_tracks, tru_tracks_false, axis=0)
nm3 = nreal+nfalse
# first update
timed_total = time()
likelihood1(c1, msmts1, msmts2)
cs = c1#cs = sparsify(c1, s)
row_sets[0,:nm1] = True
row_sets[0,nm1:] = False
col_sets[0,:nm2] = True
col_sets[0,nm2:] = False
timed_start = time()
da(cs, row_sets, includerowsorcols_dummy, col_sets, includerowsorcols_dummy,
update_matches, hypotheses, hypothesis_weights,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backidx1)
timed_update = time() - timed_start
ns = update1(update_matches, msmts1, msmts2, samples, weights)
# find likelihood between updated objects and third set of measurements
likelihood2(c2, samples, weights, ns, msmts3)
cs = c2#cs = sparsify(c2, s)
# account for the fact that each row miss is normalized
missliks = np.log(1-weights*detect_rate)
missliks_hyp = np.dot(hypotheses, missliks)
hypothesis_weights -= missliks_hyp
col_sets[0,:nm3] = True
col_sets[0,nm3:] = False
# second update
timed_start = time()
da(cs, hypotheses, hypothesis_weights, col_sets, includerowsorcols_dummy,
update_matches2, new_hypotheses, new_hypothesis_weights,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backidx2)
timed_update += time() - timed_start
new_ns = update2(update_matches2, update_matches, new_samples, new_weights,
msmts1, msmts2, msmts3)
timed_total = time() - timed_total
## analysis of how hypotheses match truth, for debugging purposes
tru_matches_1_valid = (tru_tracks[:,0] >= 0) | (tru_tracks[:,1] >= 0)
tru_matches_1 = backidx1[tru_tracks[tru_matches_1_valid,0],
tru_tracks[tru_matches_1_valid,1]]
tru_matches_not_here = sum(tru_matches_1 == -1)
if tru_matches_not_here == 0:
tru_hypothesis = np.zeros(hypotheses.shape[1], dtype=bool)
tru_hypothesis[tru_matches_1] = True
matching_hypotheses = np.where(np.all(hypotheses==tru_hypothesis,axis=1))[0]
assert len(matching_hypotheses) <= 1
if len(matching_hypotheses) == 1:
matching_hypothesis = matching_hypotheses[0]
tru_matches_2_score = tru_matches_1_valid & (tru_tracks[:,2] >= 0)
tru_matches_2in = tru_matches_1[tru_tracks[tru_matches_1_valid,2] >= 0]
total_prob = -sum(missliks[tru_matches_1])
total_prob += sum(c2[tru_matches_2in,
tru_tracks[tru_matches_2_score,2]])
tru_matches_1_score = (tru_tracks[:,0] >= 0) & (tru_tracks[:,1] >= 0)
total_prob += sum(c1[tru_tracks[tru_matches_1_valid,0],
tru_tracks[tru_matches_1_valid,1]])
if total_prob + 1e-4 < new_hypothesis_weights[0]:
print("probable error")
else:
tru_assignment_rank = np.searchsorted(new_hypothesis_weights, total_prob)
# score
timed_update_all += timed_update
timed_total_all += timed_total
include_samples = new_hypotheses[0] & (new_weights > .5)
track_scores_all += scoreTrack(tru_tracks, tru_m, update_matches,
update_matches2[new_hypotheses[0]])
obj_scores_all += scoreObj(tru, new_samples[include_samples])
timed_update_all *= 1000./ntests
timed_total_all *= 1000./ntests
obj_score_rates = obj_scores_all[:,:2].astype(float)/obj_scores_all[:,2:]
track_score_rates = track_scores_all[:2].astype(float)/track_scores_all[2:]
#score_rates = track_score_rates
score_rates = np.append(track_score_rates[None,:], obj_score_rates, axis=0)
print("{:.1f} update, {:.1f} total".format(timed_update_all, timed_total_all))
print(score_rates)
| 13,587
| 40.05136
| 95
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/murtysplitLookaheadSparse.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 3/14/19
These functions reorder rows and columns before creating subproblems.
The goal is to set it up so the first subproblem fixes everything
but the first non-missing row.
One row and column is unfixed (w/ match or miss eliminated) every new problem.
"""
import numpy as np
import numba as nb
from sparsity import nbsparsedtype
nbpairtype = nb.typeof((0,0))
# reorder rows so that misses are first
# last row should always remain last, so previous eliminations are kept
# reorder columns so that they are eliminated in order along with the rows
@nb.jit(nbpairtype(nbsparsedtype[:,:], nb.i8[:], nb.i8[:], nb.f8[:], nb.i8[:],
nb.i8, nb.i8[:], nb.i8), nopython=True)
def partitionDefault(c, x, y, v, rows2use, m2, cols2use, n2):
m3 = 0 # number of missing rows
for ri in xrange(m2-1):
i = rows2use[ri]
j = x[i]
if j == -1: # missing row
rows2use[ri] = rows2use[m3]
rows2use[m3] = i
m3 += 1
if x[rows2use[m2-1]] == -1:
m2 -= 1
n3 = 0 # number of missing columns
for cj in xrange(n2):
j = cols2use[cj]
if y[j] == -1:
cols2use[cj] = cols2use[n3]
cols2use[n3] = j
n3 += 1
assert n2-n3==m2-m3 # number of reported matches is the same
cols2use[n3:n2] = x[rows2use[m3:m2]]
# if there are missing columns, must eliminate on all rows
# if no missing columns, can eliminate only matched rows
return (0, n3) if n3 > 0 else (m3, 0)
@nb.jit(nbpairtype(nbsparsedtype[:,:], nb.i8[:], nb.i8[:], nb.f8[:], nb.i8[:],
nb.i8, nb.i8[:], nb.i8, nb.f8[:], nb.i8[:], nb.i8[:]), nopython=True)
def murtySplit(c, x, y, v, rows2use, m2, cols2use, n2,
row_cost_estimates, row_best_columns, pred):
if m2 <= 2 or n2 <= 1:
return partitionDefault(c, x, y, v, rows2use, m2, cols2use, n2)
pred[:] = 0
pred[cols2use[:n2]] = 1
# order missing columns at beginning, they will not be removed no matter
# the partition order
n3 = 0 # number of missing columns
for cj in xrange(n2):
j = cols2use[cj]
if y[j] == -1:
cols2use[cj] = cols2use[n3]
cols2use[n3] = j
n3 += 1
n_missing_cols = n3
# set aside row m2-1 and its column
last_column = x[rows2use[m2-1]]
if last_column != -1:
for cj in xrange(n2-1):
j = cols2use[cj]
if j == last_column:
cols2use[cj] = cols2use[n2-1]
cols2use[n2-1] = j
n2 -= 1 # don't use this column in lookahead
pred[last_column] = 0
m2 -= 1
# determine if all rows will be eliminated or not
n_not_eliminated_rows = 0
if n_missing_cols == 0:
# in this case, you can keep missing rows at the beginning and not fix them
m3 = 0 # number of missing rows
for ri in xrange(m2):
i = rows2use[ri]
j = x[i]
if j == -1: # missing row
rows2use[ri] = rows2use[m3]
rows2use[m3] = i
m3 += 1
assert m3 == m2 - n2
n_not_eliminated_rows = m3
# find estimated cost for row --- min(c'[i,j]) for j!=x[i]
for ri in xrange(n_not_eliminated_rows, m2):
i = rows2use[ri]
j = x[i]
ui = 0.
minval = 1e3 if j==-1 else 0. # value of missing
minj = -1
for cij in c[i]:
j2 = cij['idx']
if pred[j2]:
dj = cij['x'] - v[j2]
if j2 == j:
ui = dj
else:
if dj < minval:
minval = dj
minj = j2
row_cost_estimates[ri] = minval - ui
row_best_columns[ri] = minj
n3 = n2
for m3 in xrange(m2-1, n_not_eliminated_rows-1, -1):
# choose the *worst* current row and partition on this *last*
# meaning that partition has the fewest fixed rows & the most freedom
worst_ri = np.argmax(row_cost_estimates[n_not_eliminated_rows:m3+1])
worst_ri += n_not_eliminated_rows
worst_i = rows2use[worst_ri]
rows2use[worst_ri] = rows2use[m3]
rows2use[m3] = worst_i
# don't want to pick this row again, can just overwrite it
row_cost_estimates[worst_ri] = row_cost_estimates[m3]
row_best_columns[worst_ri] = row_best_columns[m3]
deadj = x[worst_i]
if deadj != -1:
# swap columns so this particular column matches that row
for cj in xrange(n3):
j = cols2use[cj]
if j == deadj:
cols2use[cj] = cols2use[n3-1]
cols2use[n3-1] = deadj
break
pred[deadj] = 0
n3 -= 1
# update other cost estimates that had picked the same column
for ri in xrange(n_not_eliminated_rows, m3):
if row_best_columns[ri] == deadj:
# recalculate without deadj
i = rows2use[ri]
j = x[i]
ui = 0.
minval = 1e3 if j==-1 else 0. # value of missing
minj = -1
for cij in c[i]:
j2 = cij['idx']
if pred[j2]:
dj = cij['x'] - v[j2]
if j2 == j:
ui = dj
else:
if dj < minval:
minval = dj
minj = j2
row_cost_estimates[ri] = minval - ui
row_best_columns[ri] = minj
return n_not_eliminated_rows, n_missing_cols
| 5,911
| 35.95
| 85
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/example_2frame.py
|
# -*- coding: utf-8 -*-
"""
Runs single-input K-best associations algorithm on square random matrices.
This test is meant to be directly comparable to the test code included with
Miller+Stone+Cox's implementation of data association.
"""
import numpy as np
from time import time
from daSparse import da, allocateWorkVarsforDA
from sparsity import sparsify
np.random.seed(23)
numtests = 100
nsols = 200
sizes = np.arange(10, 301, 10)
sparsity = 30
my_results = []
for size in sizes:
# max_val = -.1 # misses will occur (but are unlikely for large matrices)
max_val = -float(size+1) # to ensure that misses are never picked
noutsamples = size*5
timed_total = 0.
relative_cost = 0.
this_sparsity = min(30, size)
workvars = allocateWorkVarsforDA(size, size, nsols)
out_matches = np.zeros((noutsamples, 2), dtype=int)
out_associations = np.zeros((nsols, noutsamples), dtype=bool)
out_costs = np.zeros(nsols)
input_hypothesis = np.ones((1, size), dtype=bool)
input_score = np.zeros(1)
for test in xrange(numtests):
cd = np.random.rand(size, size) + max_val
c = sparsify(cd, this_sparsity)
timed_start = time()
da(c, input_hypothesis, input_score, input_hypothesis, input_score,
out_matches, out_associations, out_costs, *workvars)
timed_end = time()
timed_total += (timed_end-timed_start)
relative_cost += sum(np.exp(-out_costs+out_costs[0]))
my_results.append((timed_total*1000, relative_cost))
my_results = np.array(my_results) / numtests
print(my_results)
| 1,582
| 34.177778
| 76
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/sspDense.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
The Jonker-Volgenant algorithm for finding the maximum assignment.
Michael Motro, University of Texas at Austin
last modified 10/23/2018
This is a direct adaptation of the Pascal code from
"A Shortest Augmenting Path Algorithm for Dense and Sparse Linear Assignment Problems"
R. Jonker and A. Volgenant, Computing 1987
the __main__ code at the bottom tests this implementation, comparing it to
Scipy's linear_sum_assignment function. You'll need to have scipy in your
distribution to run this file on its own, but not to import it in other files.
"""
import numpy as np
import numba as nb
inf= 1e9 # inf is a suitably large number
@nb.jit(nb.f8(nb.f8[:,:], nb.i8[:], nb.i8[:], nb.f8[:],
nb.i8[:], nb.i8, nb.i8[:], nb.i8, nb.f8[:], nb.i8[:]), nopython=True)
def SSP(c, x, y, v, rows2use, nrows2use, cols2use, ncols2use, d, pred):
""" solves full 2D assignment problem
c: matrix
x: column indices that match to row, or -1 if row is missing
y: match indices for column
v: column reductions
rows2use, nrows2use: rows in rows2use[:nrows2use] are considered part of the problem
cols2use, ncols2use: " "
d, pred: workspace for SSP, remember costs and path backwards for each column
returns cost of assignment
"""
C = 0.
# basic column reduction - basically running some rows in a convenient order
nrows = nrows2use
for ri in xrange(nrows2use-1,-1,-1):
i = rows2use[ri]
j = np.argmin(c[i,:])
if c[i,j] < 0 and y[j] == -1:
x[i] = j
y[j] = i
C += c[i,j]
nrows -= 1
rows2use[ri] = rows2use[nrows]
rows2use[nrows] = i
for i1 in rows2use[:nrows]:
d[:] = c[i1,:] - v
pred[:] = i1
minmissi = i1
minmissval = 0.
ncolsunused = ncols2use
emergcounter = 0
while True:
emergcounter += 1
assert emergcounter < 2000
minval = minmissval
minj = -1
mincolidx = 0
for colidx, j in enumerate(cols2use[:ncolsunused]):
dj = d[j]
if dj < minval:
minj = j
minval = dj
mincolidx = colidx
j = minj
if j == -1:
break # hit unmatched row
i = y[j]
if i == -1:
break # hit unmatched column
# this column should no longer be considered
v[j] += minval
ncolsunused -= 1
cols2use[mincolidx] = cols2use[ncolsunused]
cols2use[ncolsunused] = j
# update distances to other columns
u1 = c[i,j] - v[j]
if -u1 < minmissval:
# this row is the closest to missing
minmissi = i
minmissval = -u1
for j in cols2use[:ncolsunused]:
dj = c[i,j] - v[j] - u1
if dj < d[j]:
d[j] = dj
pred[j] = i
# augment
# travel back through shortest path to find matches
if j==-1:
i = minmissi
j = x[i]
x[i] = -1
emergcounter = 0
while i != i1:
emergcounter += 1
assert emergcounter < 2000
i = pred[j]
y[j] = i
k = j
j = x[i]
x[i] = k
# updating of column prices
for j in cols2use[ncolsunused:ncols2use]:
v[j] -= minval
C += minval
return C
@nb.jit(nb.f8(nb.f8[:,:], nb.i8[:], nb.i8[:], nb.f8[:], nb.i8[:],
nb.i8, nb.i8[:], nb.i8, nb.f8[:], nb.i8[:],
nb.i8, nb.i8, nb.b1[:], nb.b1, nb.f8), nopython=True)
def spStep(c, x, y, v, rows2use, nrows2use, cols2use, ncols2use, d, pred,
i1, j1, eliminate_els, eliminate_miss, cost_bound):
""" solves Murty subproblem given solution to originating problem
same inputs as SSP and also:
i1, j1 = row and column that are now unassigned
eliminate_els = boolean array, whether matching a column with i1 is prohibited
eliminate_miss = whether i1 is prohibited to miss
cost_bound = function will stop early and return inf if the solution is known
to be above this bound
returns cost of shortest path, a.k.a. this solution's cost minus original solution's
"""
if j1>=0:
u0 = c[i1,j1]-v[j1] # not necessary to get solution, but gives accurate cost
else:
u0 = 0.
pred[:] = i1
ncols = ncols2use
for j in cols2use[:ncols]:
d[j] = inf if eliminate_els[j] else c[i1,j] - v[j] - u0
minmissj = -1
minmissi = i1
minmissval = inf if eliminate_miss else -u0
miss_unused = True
missing_from_row = False
missing_cost = 0. # this is a dual cost on auxiliary columns
emergcounter = 0
while True:
emergcounter += 1
assert emergcounter < 2000
minval = minmissval
minj = -2
minjcol = -1
for jcol, j in enumerate(cols2use[:ncols]):
dj = d[j]
if dj < minval:
minj = j
minval = dj
minjcol = jcol
if minval > cost_bound: return inf # that's all it takes for early stopping!
j = minj
if j==j1: break
if j == -2:
if not miss_unused: # if you got here again, costs must be really high
return inf
# entry to missing zone: row was matched but is now missing
missing=True
missing_from_row = True
else:
i = y[j]
# this column should no lonber be considered
ncols -= 1
cols2use[minjcol] = cols2use[ncols]
cols2use[ncols] = j
if i==-1:
# entry to missing zone: col was missing but is now matched
if miss_unused:
minmissj = j
missing=True
missing_from_row = False
else:
# already covered the missing zone, this is a dead end
continue
else:
missing=False
if missing:
if j1 == -1:
j=-1
break
miss_unused = False
missing_cost = minval
minmissval = inf
u1 = -minval
# exit from missing zone: row that was missing is matched
for i in rows2use[:nrows2use]:
if x[i]==-1:
for j in cols2use[:ncols]:
dj = c[i,j]-v[j]-u1
if dj < d[j]:
d[j] = dj
pred[j] = i
# exit from missing zone: col that was matched is missing
for j in cols2use[:ncols]:
if y[j] >= 0:
dj = -v[j]-u1
if dj < d[j]:
d[j] = dj
pred[j] = -1
else:
u1 = c[i,j]-v[j]-minval
if miss_unused and -u1<minmissval:
minmissi = i
minmissval = -u1
for j in cols2use[:ncols]:
dj = c[i,j]-v[j]-u1
if dj < d[j]:
d[j] = dj
pred[j] = i
# augment
# updating of column prices
v[cols2use[ncols:ncols2use]] += d[cols2use[ncols:ncols2use]] - minval
if not miss_unused:
v[cols2use[:ncols2use]] += minval - missing_cost
# travel back through shortest path to find matches
i = i1+1 # any number that isn't i1
emergcounter = 0
while i != i1:
emergcounter += 1
assert emergcounter < 2000
if j == -1:
# exit from missing zone: row was missing but is now matched
i = -1
else:
i = pred[j]
y[j] = i
if i == -1:
# exit from missing zone: column j was matched but is now missing
if missing_from_row:
# entry to missing zone: row was matched but is now missing
i = minmissi
j = x[i]
x[i] = -1
else:
# entry to missing zone: col was missing but is now matched
j = minmissj
else:
k = j
j = x[i]
x[i] = k
v[y==-1] = 0.
return minval
if __name__ == '__main__':
"""
create a random matrix
try assignment, check for equality
"""
from scipy.optimize import linear_sum_assignment
m=10
n=20
# P = np.random.exponential(size=(n,m))
# mX = np.random.exponential(size=(n,))
# mY = np.random.exponential(size=(m,))
P = np.random.rand(m,n)
mX = np.random.rand(m)
mY = np.random.rand(n)
# make full square version, use standard code
c1 = np.zeros((m+n,m+n))
c1[:m,:n] = P
c1[:m,n:] = 1e4
c1[range(m),range(n,m+n)] = mX
c1[m:,:n] = 1e4
c1[range(m,m+n), range(n)] = mY
sol = linear_sum_assignment(c1)
x1 = np.array(sol[1][:m])
x1[x1>=n] = -1
y1 = np.arange(n)
for k,j in enumerate(sol[1]):
j = sol[1][k]
if j < n:
if k < m:
y1[j] = k
else:
y1[j] = -1
print x1
print y1
y = np.zeros(n, dtype=int) - 1
x = np.zeros(m, dtype=int) - 1
v = np.zeros(n)
c2 = P - mX[:,None] - mY[None,:]
rows2use = np.arange(m)
cols2use = np.arange(n)
d = np.zeros(n)
pred = np.zeros(n, dtype=int)
SSP(c2, x, y, v, rows2use, cols2use, d, pred)
print x
print y
v += mY
u = mX.copy()
xmatch = x>=0
xmis = xmatch==False
ymis = y==-1
u[xmatch] = P[xmatch,x[xmatch]] - v[x[xmatch]]
u2 = np.append(u, np.zeros(n))
v2 = np.append(v, np.zeros(m))
x2 = np.append(x, y+n)
x2[np.where(x==-1)[0]] = np.where(x==-1)[0]+n
x2[np.where(y==-1)[0]+m] = np.where(y==-1)[0]
slack = c1 - u2[:,None] - v2
assert np.min(slack) > -1e-8
assert all(slack[range(m+n), x2] < 1e-8)
assert np.min(v[ymis]) >= -1e-8 if any(ymis) else True
| 10,323
| 31.465409
| 88
|
py
|
pmb-nll
|
pmb-nll-main/src/core/datasets/metadata.py
|
from collections import ChainMap
# Detectron imports
from detectron2.data import MetadataCatalog
# Useful Dicts for OpenImages Conversion
OPEN_IMAGES_TO_COCO = {'Person': 'person',
'Bicycle': 'bicycle',
'Car': 'car',
'Motorcycle': 'motorcycle',
'Airplane': 'airplane',
'Bus': 'bus',
'Train': 'train',
'Truck': 'truck',
'Boat': 'boat',
'Traffic light': 'traffic light',
'Fire hydrant': 'fire hydrant',
'Stop sign': 'stop sign',
'Parking meter': 'parking meter',
'Bench': 'bench',
'Bird': 'bird',
'Cat': 'cat',
'Dog': 'dog',
'Horse': 'horse',
'Sheep': 'sheep',
'Elephant': 'elephant',
'Cattle': 'cow',
'Bear': 'bear',
'Zebra': 'zebra',
'Giraffe': 'giraffe',
'Backpack': 'backpack',
'Umbrella': 'umbrella',
'Handbag': 'handbag',
'Tie': 'tie',
'Suitcase': 'suitcase',
'Flying disc': 'frisbee',
'Ski': 'skis',
'Snowboard': 'snowboard',
'Ball': 'sports ball',
'Kite': 'kite',
'Baseball bat': 'baseball bat',
'Baseball glove': 'baseball glove',
'Skateboard': 'skateboard',
'Surfboard': 'surfboard',
'Tennis racket': 'tennis racket',
'Bottle': 'bottle',
'Wine glass': 'wine glass',
'Coffee cup': 'cup',
'Fork': 'fork',
'Knife': 'knife',
'Spoon': 'spoon',
'Bowl': 'bowl',
'Banana': 'banana',
'Apple': 'apple',
'Sandwich': 'sandwich',
'Orange': 'orange',
'Broccoli': 'broccoli',
'Carrot': 'carrot',
'Hot dog': 'hot dog',
'Pizza': 'pizza',
'Doughnut': 'donut',
'Cake': 'cake',
'Chair': 'chair',
'Couch': 'couch',
'Houseplant': 'potted plant',
'Bed': 'bed',
'Table': 'dining table',
'Toilet': 'toilet',
'Television': 'tv',
'Laptop': 'laptop',
'Computer mouse': 'mouse',
'Remote control': 'remote',
'Computer keyboard': 'keyboard',
'Mobile phone': 'cell phone',
'Microwave oven': 'microwave',
'Oven': 'oven',
'Toaster': 'toaster',
'Sink': 'sink',
'Refrigerator': 'refrigerator',
'Book': 'book',
'Clock': 'clock',
'Vase': 'vase',
'Scissors': 'scissors',
'Teddy bear': 'teddy bear',
'Hair dryer': 'hair drier',
'Toothbrush': 'toothbrush'}
# Construct COCO metadata
COCO_THING_CLASSES = MetadataCatalog.get('coco_2017_train').thing_classes
COCO_THING_DATASET_ID_TO_CONTIGUOUS_ID = MetadataCatalog.get(
'coco_2017_train').thing_dataset_id_to_contiguous_id
# Construct OpenImages metadata
OPENIMAGES_THING_DATASET_ID_TO_CONTIGUOUS_ID = dict(
ChainMap(*[{i + 1: i} for i in range(len(COCO_THING_CLASSES))]))
# MAP COCO to OpenImages contiguous id to be used for inference on OpenImages for models
# trained on COCO.
COCO_TO_OPENIMAGES_CONTIGUOUS_ID = dict(ChainMap(
*[{COCO_THING_CLASSES.index(openimages_thing_class): COCO_THING_CLASSES.index(openimages_thing_class)} for
openimages_thing_class in
COCO_THING_CLASSES]))
# Construct VOC metadata
VOC_THING_CLASSES = ['person',
'bird',
'cat',
'cow',
'dog',
'horse',
'sheep',
'airplane',
'bicycle',
'boat',
'bus',
'car',
'motorcycle',
'train',
'bottle',
'chair',
'dining table',
'potted plant',
'couch',
'tv',
]
VOC_THING_DATASET_ID_TO_CONTIGUOUS_ID = dict(
ChainMap(*[{i + 1: i} for i in range(len(VOC_THING_CLASSES))]))
# MAP COCO to VOC contiguous id to be used for inference on VOC for models
# trained on COCO.
COCO_TO_VOC_CONTIGUOUS_ID = dict(ChainMap(
*[{COCO_THING_CLASSES.index(voc_thing_class): VOC_THING_CLASSES.index(voc_thing_class)} for voc_thing_class in
VOC_THING_CLASSES]))
| 5,503
| 39.77037
| 114
|
py
|
pmb-nll
|
pmb-nll-main/src/core/datasets/convert_openimages_to_coco.py
|
import argparse
import csv
import cv2
import json
import os
from tqdm import tqdm
# Project imports
import core.datasets.metadata as metadata
def main(args):
dataset_dir = args.dataset_dir
if args.output_dir is None:
output_dir = os.path.expanduser(
os.path.join(dataset_dir, 'COCO-Format'))
else:
output_dir = os.path.expanduser(args.output_dir)
os.makedirs(output_dir, exist_ok=True)
# Get category mapping from openimages symbol to openimages names.
with open(os.path.expanduser(os.path.join(dataset_dir, 'class-descriptions-boxable.csv')), 'r', encoding='utf-8') as f:
csv_f = csv.reader(f)
openimages_class_mapping_dict = dict()
for row in csv_f:
openimages_class_mapping_dict.update({row[0]: row[1]})
# Get mapping from openimages names to coco names
open_images_to_coco_dict = metadata.OPEN_IMAGES_TO_COCO
# Get annotation csv path and image directories
annotations_csv_path = os.path.expanduser(
os.path.join(dataset_dir, 'train-annotations-bbox.csv'))
image_dir = os.path.expanduser(os.path.join(dataset_dir, 'images'))
id_list = [image[:-4] for image in os.listdir(image_dir)]
# Begin processing annotations
with open(annotations_csv_path, 'r', encoding='utf-8') as f:
csv_f = csv.reader(f)
processed_ids = []
images_list = []
annotations_list = []
count = 0
with tqdm(total=len(id_list)) as pbar:
for i, row in enumerate(csv_f):
image_id = row[0]
if image_id in id_list:
image = cv2.imread(
os.path.join(
image_dir,
image_id) + '.jpg')
width = image.shape[1]
height = image.shape[0]
category_symbol = row[2]
category_name = openimages_class_mapping_dict[category_symbol]
if category_name in list(open_images_to_coco_dict.keys()):
mapped_category = open_images_to_coco_dict[category_name]
category_id = list(
open_images_to_coco_dict.values()).index(mapped_category) + 1
x_min = float(row[4]) * width
x_max = float(row[5]) * width
y_min = float(row[6]) * height
y_max = float(row[7]) * height
is_occluded = int(row[8])
is_truncated = int(row[9])
bbox_coco = [
x_min,
y_min,
x_max - x_min,
y_max - y_min]
annotations_list.append({'image_id': image_id,
'id': count,
'category_id': category_id,
'bbox': bbox_coco,
'area': bbox_coco[2] * bbox_coco[3],
'iscrowd': 0,
'is_truncated': is_truncated,
'is_occluded': is_occluded})
count += 1
else:
continue
if image_id not in processed_ids:
pbar.update(1)
images_list.append({'id': image_id,
'width': width,
'height': height,
'file_name': image_id + '.jpg',
'license': 1})
processed_ids.append(image_id)
else:
continue
licenses = [{'id': 1,
'name': 'none',
'url': 'none'}]
categories = [
{"supercategory": "person", "id": 1, "name": "person"},
{"supercategory": "vehicle", "id": 2, "name": "bicycle"},
{"supercategory": "vehicle", "id": 3, "name": "car"},
{"supercategory": "vehicle", "id": 4, "name": "motorcycle"},
{"supercategory": "vehicle", "id": 5, "name": "airplane"},
{"supercategory": "vehicle", "id": 6, "name": "bus"},
{"supercategory": "vehicle", "id": 7, "name": "train"},
{"supercategory": "vehicle", "id": 8, "name": "truck"},
{"supercategory": "vehicle", "id": 9, "name": "boat"},
{"supercategory": "outdoor", "id": 10, "name": "traffic light"},
{"supercategory": "outdoor", "id": 11, "name": "fire hydrant"},
{"supercategory": "outdoor", "id": 12, "name": "stop sign"},
{"supercategory": "outdoor", "id": 13, "name": "parking meter"},
{"supercategory": "outdoor", "id": 14, "name": "bench"},
{"supercategory": "animal", "id": 15, "name": "bird"},
{"supercategory": "animal", "id": 16, "name": "cat"},
{"supercategory": "animal", "id": 17, "name": "dog"},
{"supercategory": "animal", "id": 18, "name": "horse"},
{"supercategory": "animal", "id": 19, "name": "sheep"},
{"supercategory": "animal", "id": 20, "name": "cow"},
{"supercategory": "animal", "id": 21, "name": "elephant"},
{"supercategory": "animal", "id": 22, "name": "bear"},
{"supercategory": "animal", "id": 23, "name": "zebra"},
{"supercategory": "animal", "id": 24, "name": "giraffe"},
{"supercategory": "accessory", "id": 25, "name": "backpack"},
{"supercategory": "accessory", "id": 26, "name": "umbrella"},
{"supercategory": "accessory", "id": 27, "name": "handbag"},
{"supercategory": "accessory", "id": 28, "name": "tie"},
{"supercategory": "accessory", "id": 29, "name": "suitcase"},
{"supercategory": "sports", "id": 30, "name": "frisbee"},
{"supercategory": "sports", "id": 31, "name": "skis"},
{"supercategory": "sports", "id": 32, "name": "snowboard"},
{"supercategory": "sports", "id": 33, "name": "sports ball"},
{"supercategory": "sports", "id": 34, "name": "kite"},
{"supercategory": "sports", "id": 35, "name": "baseball bat"},
{"supercategory": "sports", "id": 36, "name": "baseball glove"},
{"supercategory": "sports", "id": 37, "name": "skateboard"},
{"supercategory": "sports", "id": 38, "name": "surfboard"},
{"supercategory": "sports", "id": 39, "name": "tennis racket"},
{"supercategory": "kitchen", "id": 40, "name": "bottle"},
{"supercategory": "kitchen", "id": 41, "name": "wine glass"},
{"supercategory": "kitchen", "id": 42, "name": "cup"},
{"supercategory": "kitchen", "id": 43, "name": "fork"},
{"supercategory": "kitchen", "id": 44, "name": "knife"},
{"supercategory": "kitchen", "id": 45, "name": "spoon"},
{"supercategory": "kitchen", "id": 46, "name": "bowl"},
{"supercategory": "food", "id": 47, "name": "banana"},
{"supercategory": "food", "id": 48, "name": "apple"},
{"supercategory": "food", "id": 49, "name": "sandwich"},
{"supercategory": "food", "id": 50, "name": "orange"},
{"supercategory": "food", "id": 51, "name": "broccoli"},
{"supercategory": "food", "id": 52, "name": "carrot"},
{"supercategory": "food", "id": 53, "name": "hot dog"},
{"supercategory": "food", "id": 54, "name": "pizza"},
{"supercategory": "food", "id": 55, "name": "donut"},
{"supercategory": "food", "id": 56, "name": "cake"},
{"supercategory": "furniture", "id": 57, "name": "chair"},
{"supercategory": "furniture", "id": 58, "name": "couch"},
{"supercategory": "furniture", "id": 59, "name": "potted plant"},
{"supercategory": "furniture", "id": 60, "name": "bed"},
{"supercategory": "furniture", "id": 61, "name": "dining table"},
{"supercategory": "furniture", "id": 62, "name": "toilet"},
{"supercategory": "electronic", "id": 63, "name": "tv"},
{"supercategory": "electronic", "id": 64, "name": "laptop"},
{"supercategory": "electronic", "id": 65, "name": "mouse"},
{"supercategory": "electronic", "id": 66, "name": "remote"},
{"supercategory": "electronic", "id": 67, "name": "keyboard"},
{"supercategory": "electronic", "id": 68, "name": "cell phone"},
{"supercategory": "appliance", "id": 69, "name": "microwave"},
{"supercategory": "appliance", "id": 70, "name": "oven"},
{"supercategory": "appliance", "id": 71, "name": "toaster"},
{"supercategory": "appliance", "id": 72, "name": "sink"},
{"supercategory": "appliance", "id": 73, "name": "refrigerator"},
{"supercategory": "indoor", "id": 74, "name": "book"},
{"supercategory": "indoor", "id": 75, "name": "clock"},
{"supercategory": "indoor", "id": 76, "name": "vase"},
{"supercategory": "indoor", "id": 77, "name": "scissors"},
{"supercategory": "indoor", "id": 78, "name": "teddy bear"},
{"supercategory": "indoor", "id": 79, "name": "hair drier"},
{"supercategory": "indoor", "id": 80, "name": "toothbrush"}]
json_dict_val = {'info': {'year': 2020},
'licenses': licenses,
'categories': categories,
'images': images_list,
'annotations': annotations_list}
val_file_name = os.path.join(output_dir, 'val_coco_format.json')
with open(val_file_name, 'w') as outfile:
json.dump(json_dict_val, outfile)
if __name__ == "__main__":
# Create arg parser
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset-dir",
required=True,
type=str,
help='bdd100k dataset directory')
parser.add_argument(
"--output-dir",
required=False,
type=str,
help='converted dataset write directory')
args = parser.parse_args()
main(args)
| 10,246
| 45.789954
| 123
|
py
|
pmb-nll
|
pmb-nll-main/src/core/datasets/convert_openimages_odd_to_coco.py
|
import argparse
import csv
import cv2
import json
import os
from tqdm import tqdm
def main(args):
dataset_dir = args.dataset_dir
if args.output_dir is None:
output_dir = os.path.expanduser(
os.path.join(dataset_dir, 'COCO-Format'))
else:
output_dir = os.path.expanduser(args.output_dir)
os.makedirs(output_dir, exist_ok=True)
# Get category mapping from openimages symbol to openimages names.
with open(os.path.expanduser(os.path.join(dataset_dir, 'class-descriptions-boxable.csv')), 'r', encoding='utf-8') as f:
csv_f = csv.reader(f)
openimages_class_mapping_dict = dict()
for row in csv_f:
openimages_class_mapping_dict.update({row[0]: row[1]})
# Get annotation csv path and image directories
annotations_csv_path = os.path.expanduser(
os.path.join(dataset_dir, 'train-annotations-bbox.csv'))
image_dir = os.path.expanduser(os.path.join(dataset_dir, 'images'))
id_list = [image[:-4] for image in os.listdir(image_dir)]
# Begin processing annotations
with open(annotations_csv_path, 'r', encoding='utf-8') as f:
csv_f = csv.reader(f)
processed_ids = []
images_list = []
annotations_list = []
with tqdm(total=len(id_list)) as pbar:
for i, row in enumerate(csv_f):
image_id = row[0]
if image_id in id_list:
image = cv2.imread(
os.path.join(
image_dir,
image_id) + '.jpg')
width = image.shape[1]
height = image.shape[0]
if image_id not in processed_ids:
pbar.update(1)
images_list.append({'id': image_id,
'width': width,
'height': height,
'file_name': image_id + '.jpg',
'license': 1})
processed_ids.append(image_id)
else:
continue
licenses = [{'id': 1,
'name': 'none',
'url': 'none'}]
categories = [
{"supercategory": "person", "id": 1, "name": "person"},
{"supercategory": "vehicle", "id": 2, "name": "bicycle"},
{"supercategory": "vehicle", "id": 3, "name": "car"},
{"supercategory": "vehicle", "id": 4, "name": "motorcycle"},
{"supercategory": "vehicle", "id": 5, "name": "airplane"},
{"supercategory": "vehicle", "id": 6, "name": "bus"},
{"supercategory": "vehicle", "id": 7, "name": "train"},
{"supercategory": "vehicle", "id": 8, "name": "truck"},
{"supercategory": "vehicle", "id": 9, "name": "boat"},
{"supercategory": "outdoor", "id": 10, "name": "traffic light"},
{"supercategory": "outdoor", "id": 11, "name": "fire hydrant"},
{"supercategory": "outdoor", "id": 12, "name": "stop sign"},
{"supercategory": "outdoor", "id": 13, "name": "parking meter"},
{"supercategory": "outdoor", "id": 14, "name": "bench"},
{"supercategory": "animal", "id": 15, "name": "bird"},
{"supercategory": "animal", "id": 16, "name": "cat"},
{"supercategory": "animal", "id": 17, "name": "dog"},
{"supercategory": "animal", "id": 18, "name": "horse"},
{"supercategory": "animal", "id": 19, "name": "sheep"},
{"supercategory": "animal", "id": 20, "name": "cow"},
{"supercategory": "animal", "id": 21, "name": "elephant"},
{"supercategory": "animal", "id": 22, "name": "bear"},
{"supercategory": "animal", "id": 23, "name": "zebra"},
{"supercategory": "animal", "id": 24, "name": "giraffe"},
{"supercategory": "accessory", "id": 25, "name": "backpack"},
{"supercategory": "accessory", "id": 26, "name": "umbrella"},
{"supercategory": "accessory", "id": 27, "name": "handbag"},
{"supercategory": "accessory", "id": 28, "name": "tie"},
{"supercategory": "accessory", "id": 29, "name": "suitcase"},
{"supercategory": "sports", "id": 30, "name": "frisbee"},
{"supercategory": "sports", "id": 31, "name": "skis"},
{"supercategory": "sports", "id": 32, "name": "snowboard"},
{"supercategory": "sports", "id": 33, "name": "sports ball"},
{"supercategory": "sports", "id": 34, "name": "kite"},
{"supercategory": "sports", "id": 35, "name": "baseball bat"},
{"supercategory": "sports", "id": 36, "name": "baseball glove"},
{"supercategory": "sports", "id": 37, "name": "skateboard"},
{"supercategory": "sports", "id": 38, "name": "surfboard"},
{"supercategory": "sports", "id": 39, "name": "tennis racket"},
{"supercategory": "kitchen", "id": 40, "name": "bottle"},
{"supercategory": "kitchen", "id": 41, "name": "wine glass"},
{"supercategory": "kitchen", "id": 42, "name": "cup"},
{"supercategory": "kitchen", "id": 43, "name": "fork"},
{"supercategory": "kitchen", "id": 44, "name": "knife"},
{"supercategory": "kitchen", "id": 45, "name": "spoon"},
{"supercategory": "kitchen", "id": 46, "name": "bowl"},
{"supercategory": "food", "id": 47, "name": "banana"},
{"supercategory": "food", "id": 48, "name": "apple"},
{"supercategory": "food", "id": 49, "name": "sandwich"},
{"supercategory": "food", "id": 50, "name": "orange"},
{"supercategory": "food", "id": 51, "name": "broccoli"},
{"supercategory": "food", "id": 52, "name": "carrot"},
{"supercategory": "food", "id": 53, "name": "hot dog"},
{"supercategory": "food", "id": 54, "name": "pizza"},
{"supercategory": "food", "id": 55, "name": "donut"},
{"supercategory": "food", "id": 56, "name": "cake"},
{"supercategory": "furniture", "id": 57, "name": "chair"},
{"supercategory": "furniture", "id": 58, "name": "couch"},
{"supercategory": "furniture", "id": 59, "name": "potted plant"},
{"supercategory": "furniture", "id": 60, "name": "bed"},
{"supercategory": "furniture", "id": 61, "name": "dining table"},
{"supercategory": "furniture", "id": 62, "name": "toilet"},
{"supercategory": "electronic", "id": 63, "name": "tv"},
{"supercategory": "electronic", "id": 64, "name": "laptop"},
{"supercategory": "electronic", "id": 65, "name": "mouse"},
{"supercategory": "electronic", "id": 66, "name": "remote"},
{"supercategory": "electronic", "id": 67, "name": "keyboard"},
{"supercategory": "electronic", "id": 68, "name": "cell phone"},
{"supercategory": "appliance", "id": 69, "name": "microwave"},
{"supercategory": "appliance", "id": 70, "name": "oven"},
{"supercategory": "appliance", "id": 71, "name": "toaster"},
{"supercategory": "appliance", "id": 72, "name": "sink"},
{"supercategory": "appliance", "id": 73, "name": "refrigerator"},
{"supercategory": "indoor", "id": 74, "name": "book"},
{"supercategory": "indoor", "id": 75, "name": "clock"},
{"supercategory": "indoor", "id": 76, "name": "vase"},
{"supercategory": "indoor", "id": 77, "name": "scissors"},
{"supercategory": "indoor", "id": 78, "name": "teddy bear"},
{"supercategory": "indoor", "id": 79, "name": "hair drier"},
{"supercategory": "indoor", "id": 80, "name": "toothbrush"}]
json_dict_val = {'info': {'year': 2020},
'licenses': licenses,
'categories': categories,
'images': images_list,
'annotations': annotations_list}
val_file_name = os.path.join(output_dir, 'val_coco_format.json')
with open(val_file_name, 'w') as outfile:
json.dump(json_dict_val, outfile)
if __name__ == "__main__":
# Create arg parser
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset-dir",
required=True,
type=str,
help='bdd100k dataset directory')
parser.add_argument(
"--output-dir",
required=False,
type=str,
help='converted dataset write directory')
args = parser.parse_args()
main(args)
| 8,433
| 46.117318
| 123
|
py
|
pmb-nll
|
pmb-nll-main/src/core/datasets/generate_coco_corrupted_dataset.py
|
import argparse
import contextlib
import cv2
import joblib
import numpy as np
import os
import random
from joblib import Parallel, delayed
from multiprocessing import Manager, cpu_count
from time import sleep
from tqdm import tqdm
# Project imports
from probabilistic_inference.inference_utils import corrupt
# Fix random seeds
np.random.seed(0)
random.seed(0)
@contextlib.contextmanager
def tqdm_joblib(tqdm_object):
"""Context manager to patch joblib to report into tqdm progress bar given as argument"""
class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
tqdm_object.update(n=self.batch_size)
return super().__call__(*args, **kwargs)
old_batch_callback = joblib.parallel.BatchCompletionCallBack
joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback
try:
yield tqdm_object
finally:
joblib.parallel.BatchCompletionCallBack = old_batch_callback
tqdm_object.close()
class Counter(object):
def __init__(self, manager, initval=0):
self.val = manager.Value('i', initval)
self.lock = manager.Lock()
def reset(self, hard=False):
with self.lock:
if hard:
self.val.value = 0
elif self.val.value > 18:
self.val.value = 0
def increment(self):
with self.lock:
self.val.value += 1
def value(self):
with self.lock:
return self.val.value
def main(args):
#########################################################
# Specify Source Folders and Parameters For Frame Reader
#########################################################
dataset_dir = args.dataset_dir
image_dir = os.path.expanduser(os.path.join(dataset_dir, 'val2017'))
image_list = os.listdir(image_dir)
max_corruption_levels = [1, 2, 3, 4, 5]
# To get deterministic results across runs, keep this value 1. For faster dataset generation, uncomment cpu_count().
num_cores = 1
#num_cores = cpu_count()
corruption_number = Counter(Manager(), initval=0)
for corruption_level in max_corruption_levels:
output_dir = os.path.expanduser(
os.path.join(dataset_dir, 'val2017_' + str(corruption_level)))
os.makedirs(output_dir, exist_ok=True)
print(
'Generating corrupted data at corruption level ' +
str(corruption_level))
with tqdm_joblib(tqdm(desc="Images corrupted:", total=len(image_list))) as _:
Parallel(
n_jobs=num_cores,
backend='loky')(
delayed(generate_corrupted_data)(
image_dir,
output_dir,
image_i,
corruption_level,
corruption_number) for image_i in image_list)
corruption_number.reset(hard=True)
def generate_corrupted_data(
image_dir,
output_dir,
image_i,
corruption_level,
corruption_number):
image_tensor = cv2.imread(os.path.join(image_dir, image_i))
image_tensor = cv2.cvtColor(image_tensor, cv2.COLOR_BGR2RGB)
corruption_number.reset()
corrupt_im = corrupt(
image_tensor,
severity=corruption_level,
corruption_name=None,
corruption_number=corruption_number.value())
image_tensor = cv2.cvtColor(corrupt_im, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(output_dir, image_i), image_tensor)
corruption_number.increment()
if __name__ == "__main__":
# Create arg parser
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset-dir",
required=True,
type=str,
help='bdd100k dataset directory')
parser.add_argument(
"--output-dir",
required=False,
type=str,
help='converted dataset write directory')
args = parser.parse_args()
main(args)
| 4,086
| 27.58042
| 120
|
py
|
pmb-nll
|
pmb-nll-main/src/core/datasets/convert_voc_to_coco.py
|
import argparse
import cv2
import json
import numpy as np
import os
from pascal_voc_tools import XmlParser
def create_coco_lists(ids_list, image_dir, annotations_dir, category_mapper):
"""
Creates lists in coco format to be written to JSON file.
"""
parser = XmlParser()
images_list = []
annotations_list = []
count = 0
for image_id in ids_list:
image = cv2.imread(os.path.join(image_dir, image_id) + '.jpg')
images_list.append({'id': image_id,
'width': image.shape[1],
'height': image.shape[0],
'file_name': image_id + '.jpg',
'license': 1})
gt_frame = parser.load(
os.path.join(
annotations_dir,
image_id) + '.xml')
object_list = gt_frame['object']
category_names = [object_inst['name'] for object_inst in object_list]
# Convert British nouns used in PascalVOC to American nouns used in
# COCO
category_names = ['dining table' if category_name ==
'diningtable' else category_name for category_name in category_names]
category_names = ['motorcycle' if category_name ==
'motorbike' else category_name for category_name in category_names]
category_names = ['potted plant' if category_name ==
'pottedplant' else category_name for category_name in category_names]
category_names = ['airplane' if category_name ==
'aeroplane' else category_name for category_name in category_names]
category_names = ['tv' if category_name ==
'tvmonitor' else category_name for category_name in category_names]
category_names = ['couch' if category_name ==
'sofa' else category_name for category_name in category_names]
frame_boxes = np.array(
[
[
object_inst['bndbox']['xmin'],
object_inst['bndbox']['ymin'],
object_inst['bndbox']['xmax'],
object_inst['bndbox']['ymax']] for object_inst in object_list]).astype(
np.float)
for bbox, category_name in zip(frame_boxes, category_names):
bbox_coco = [
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1]]
annotations_list.append({'image_id': image_id,
'id': count,
'category_id': category_mapper[category_name],
'bbox': bbox_coco,
'area': bbox_coco[2] * bbox_coco[3],
'iscrowd': 0})
count += 1
return images_list, annotations_list
def main(args):
#########################################################
# Specify Source Folders and Parameters For Frame Reader
#########################################################
dataset_dir = args.dataset_dir
image_dir = os.path.expanduser(os.path.join(dataset_dir, 'JPEGImages'))
annotations_dir = os.path.expanduser(
os.path.join(dataset_dir, 'Annotations'))
train_ids_file = os.path.expanduser(
os.path.join(
dataset_dir,
'ImageSets',
'Main',
'train') + '.txt')
val_ids_file = os.path.expanduser(
os.path.join(
dataset_dir,
'ImageSets',
'Main',
'val') + '.txt')
if args.output_dir is None:
output_dir = os.path.expanduser(
os.path.join(dataset_dir, 'COCO-Format'))
else:
output_dir = os.path.expanduser(args.output_dir)
os.makedirs(output_dir, exist_ok=True)
licenses = [{'id': 1,
'name': 'none',
'url': 'none'}]
categories = [{'id': 1, 'name': 'person', 'supercategory': 'person'},
{'id': 2, 'name': 'bird', 'supercategory': 'animal'},
{'id': 3, 'name': 'cat', 'supercategory': 'animal'},
{'id': 4, 'name': 'cow', 'supercategory': 'animal'},
{'id': 5, 'name': 'dog', 'supercategory': 'animal'},
{'id': 6, 'name': 'horse', 'supercategory': 'animal'},
{'id': 7, 'name': 'sheep', 'supercategory': 'animal'},
{'id': 8, 'name': 'airplane', 'supercategory': 'vehicle'},
{'id': 9, 'name': 'bicycle', 'supercategory': 'vehicle'},
{'id': 10, 'name': 'boat', 'supercategory': 'vehicle'},
{'id': 11, 'name': 'bus', 'supercategory': 'vehicle'},
{'id': 12, 'name': 'car', 'supercategory': 'vehicle'},
{'id': 13, 'name': 'motorcycle', 'supercategory': 'vehicle'},
{'id': 14, 'name': 'train', 'supercategory': 'vehicle'},
{'id': 15, 'name': 'bottle', 'supercategory': 'indoor'},
{'id': 16, 'name': 'chair', 'supercategory': 'indoor'},
{'id': 17, 'name': 'dining table', 'supercategory': 'indoor'},
{'id': 18, 'name': 'potted plant', 'supercategory': 'indoor'},
{'id': 19, 'name': 'couch', 'supercategory': 'indoor'},
{'id': 20, 'name': 'tv', 'supercategory': 'indoor'},
]
category_mapper = {}
category_keys = [category['name'] for category in categories]
for category_name, category in zip(category_keys, categories):
category_mapper[category_name] = category['id']
# Process Training Labels
with open(train_ids_file, 'r') as f:
train_ids_list = [line for line in f.read().splitlines()]
training_image_list, training_annotation_list = create_coco_lists(
train_ids_list, image_dir, annotations_dir, category_mapper)
json_dict_training = {'info': {'year': 2020},
'licenses': licenses,
'categories': categories,
'images': training_image_list,
'annotations': training_annotation_list}
training_file_name = os.path.join(output_dir, 'train_coco_format.json')
with open(training_file_name, 'w') as outfile:
json.dump(json_dict_training, outfile)
print("Finished processing PascalVOC training data!")
# Process Validation Labels
with open(val_ids_file, 'r') as f:
val_ids_list = [line for line in f.read().splitlines()]
validation_image_list, validation_annotation_list = create_coco_lists(
val_ids_list, image_dir, annotations_dir, category_mapper)
json_dict_validation = {'info': {'year': 2020},
'licenses': licenses,
'categories': categories,
'images': validation_image_list,
'annotations': validation_annotation_list}
validation_file_name = os.path.join(output_dir, 'val_coco_format.json')
with open(validation_file_name, 'w') as outfile:
json.dump(json_dict_validation, outfile)
print("Converted PascalVOC to COCO format!")
if __name__ == "__main__":
# Create arg parser
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset-dir",
required=True,
type=str,
help='bdd100k dataset directory')
parser.add_argument(
"--output-dir",
required=False,
type=str,
help='converted dataset write directory')
args = parser.parse_args()
main(args)
| 7,734
| 37.869347
| 95
|
py
|
pmb-nll
|
pmb-nll-main/src/core/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
pmb-nll
|
pmb-nll-main/src/core/datasets/setup_datasets.py
|
import os
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.data.datasets import register_coco_instances
# Project imports
import core.datasets.metadata as metadata
def setup_all_datasets(dataset_dir, image_root_corruption_prefix=None):
"""
Registers all datasets as instances from COCO
Args:
dataset_dir(str): path to dataset directory
"""
setup_coco_dataset(
dataset_dir,
image_root_corruption_prefix=image_root_corruption_prefix)
setup_openim_dataset(dataset_dir)
setup_openim_odd_dataset(dataset_dir)
def setup_coco_dataset(dataset_dir, image_root_corruption_prefix=None):
"""
sets up coco dataset following detectron2 coco instance format. Required to not have flexibility on where the dataset
files can be.
"""
train_image_dir = os.path.join(dataset_dir, 'train2017')
if image_root_corruption_prefix is not None:
test_image_dir = os.path.join(
dataset_dir, 'val2017' + image_root_corruption_prefix)
else:
test_image_dir = os.path.join(dataset_dir, 'val2017')
train_json_annotations = os.path.join(
dataset_dir, 'annotations', 'instances_train2017.json')
test_json_annotations = os.path.join(
dataset_dir, 'annotations', 'instances_val2017.json')
register_coco_instances(
"coco_2017_custom_train",
{},
train_json_annotations,
train_image_dir)
MetadataCatalog.get(
"coco_2017_custom_train").thing_classes = metadata.COCO_THING_CLASSES
MetadataCatalog.get(
"coco_2017_custom_train").thing_dataset_id_to_contiguous_id = metadata.COCO_THING_DATASET_ID_TO_CONTIGUOUS_ID
register_coco_instances(
"coco_2017_custom_val",
{},
test_json_annotations,
test_image_dir)
MetadataCatalog.get(
"coco_2017_custom_val").thing_classes = metadata.COCO_THING_CLASSES
MetadataCatalog.get(
"coco_2017_custom_val").thing_dataset_id_to_contiguous_id = metadata.COCO_THING_DATASET_ID_TO_CONTIGUOUS_ID
def setup_openim_dataset(dataset_dir):
"""
sets up openimages dataset following detectron2 coco instance format. Required to not have flexibility on where the dataset
files can be.
Only validation is supported.
"""
test_image_dir = os.path.join(dataset_dir, 'images')
test_json_annotations = os.path.join(
dataset_dir, 'COCO-Format', 'val_coco_format.json')
register_coco_instances(
"openimages_val",
{},
test_json_annotations,
test_image_dir)
MetadataCatalog.get(
"openimages_val").thing_classes = metadata.COCO_THING_CLASSES
MetadataCatalog.get(
"openimages_val").thing_dataset_id_to_contiguous_id = metadata.OPENIMAGES_THING_DATASET_ID_TO_CONTIGUOUS_ID
def setup_openim_odd_dataset(dataset_dir):
"""
sets up openimages out-of-distribution dataset following detectron2 coco instance format. Required to not have flexibility on where the dataset
files can be.
Only validation is supported.
"""
test_image_dir = os.path.join(dataset_dir, 'images')
test_json_annotations = os.path.join(
dataset_dir, 'COCO-Format', 'val_coco_format.json')
register_coco_instances(
"openimages_ood_val",
{},
test_json_annotations,
test_image_dir)
MetadataCatalog.get(
"openimages_ood_val").thing_classes = metadata.COCO_THING_CLASSES
MetadataCatalog.get(
"openimages_ood_val").thing_dataset_id_to_contiguous_id = metadata.OPENIMAGES_THING_DATASET_ID_TO_CONTIGUOUS_ID
| 3,624
| 32.256881
| 147
|
py
|
pmb-nll
|
pmb-nll-main/src/core/evaluation_tools/scoring_rules.py
|
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def sigmoid_compute_cls_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for multilabel classification results provided by retinanet.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and brier score.
"""
output_dict = {}
num_forecasts = input_matches["predicted_cls_probs"][valid_idxs].shape[0]
# Construct binary probability vectors. Essential for RetinaNet as it uses
# multilabel and not multiclass formulation.
predicted_class_probs = input_matches["predicted_score_of_gt_category"][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_class_probs.shape[0] == 0:
output_dict.update({"ignorance_score_mean": None, "brier_score_mean": None})
return output_dict
predicted_multilabel_probs = torch.stack(
[predicted_class_probs, 1.0 - predicted_class_probs], dim=1
)
correct_multilabel_probs = torch.stack(
[torch.ones(num_forecasts), torch.zeros(num_forecasts)], dim=1
).to(device)
predicted_log_likelihood_of_correct_category = (
-correct_multilabel_probs * torch.log(predicted_multilabel_probs)
).sum(1)
cls_ignorance_score_mean = predicted_log_likelihood_of_correct_category.mean()
output_dict.update(
{"ignorance_score_mean": cls_ignorance_score_mean.to(device).tolist()}
)
# Classification Brier (Probability) Score
predicted_brier_raw = (
(predicted_multilabel_probs - correct_multilabel_probs) ** 2
).sum(1)
cls_brier_score_mean = predicted_brier_raw.mean()
output_dict.update({"brier_score_mean": cls_brier_score_mean.to(device).tolist()})
return output_dict
def softmax_compute_cls_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for multiclass classification results provided by faster_rcnn.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and brier score.
"""
output_dict = {}
predicted_multilabel_probs = input_matches["predicted_cls_probs"][valid_idxs]
if predicted_multilabel_probs.shape[0] == 0:
output_dict.update({"ignorance_score_mean": None, "brier_score_mean": None})
return output_dict
if "gt_cat_idxs" in input_matches.keys():
correct_multilabel_probs = torch.nn.functional.one_hot(
input_matches["gt_cat_idxs"][valid_idxs].type(torch.LongTensor),
input_matches["predicted_cls_probs"][valid_idxs].shape[-1],
).to(device)
else:
correct_multilabel_probs = torch.zeros_like(predicted_multilabel_probs).to(
device
)
correct_multilabel_probs[:, -1] = 1.0
predicted_log_likelihood_of_correct_category = (
-correct_multilabel_probs * torch.log(predicted_multilabel_probs)
).sum(1)
cls_ignorance_score_mean = predicted_log_likelihood_of_correct_category.mean()
output_dict.update(
{"ignorance_score_mean": cls_ignorance_score_mean.to(device).tolist()}
)
# Classification Probability Score. Multiclass version of brier score.
predicted_brier_raw = (
(predicted_multilabel_probs - correct_multilabel_probs) ** 2
).sum(1)
cls_brier_score_mean = predicted_brier_raw.mean()
output_dict.update({"brier_score_mean": cls_brier_score_mean.to(device).tolist()})
return output_dict
def compute_reg_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for regression results.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and energy scores.
"""
output_dict = {}
predicted_box_means = input_matches["predicted_box_means"][valid_idxs]
predicted_box_covars = input_matches["predicted_box_covariances"][valid_idxs]
gt_box_means = input_matches["gt_box_means"][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_box_means.shape[0] == 0:
output_dict.update(
{
"ignorance_score_mean": None,
"mean_squared_error": None,
"energy_score_mean": None,
}
)
return output_dict
# Compute negative log likelihood
# Note: Juggling between CPU and GPU is due to magma library unresolvable issue, where cuda illegal memory access
# error is returned arbitrarily depending on the state of the GPU. This is only a problem for the
# torch.distributions code.
# Pytorch unresolved issue from 2019:
# https://github.com/pytorch/pytorch/issues/21819
predicted_multivariate_normal_dists = (
torch.distributions.multivariate_normal.MultivariateNormal(
predicted_box_means.to("cpu"),
predicted_box_covars.to("cpu")
+ 1e-2 * torch.eye(predicted_box_covars.shape[2]).to("cpu"),
)
)
predicted_multivariate_normal_dists.loc = (
predicted_multivariate_normal_dists.loc.to(device)
)
predicted_multivariate_normal_dists.scale_tril = (
predicted_multivariate_normal_dists.scale_tril.to(device)
)
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = (
predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(device)
)
predicted_multivariate_normal_dists.covariance_matrix = (
predicted_multivariate_normal_dists.covariance_matrix.to(device)
)
predicted_multivariate_normal_dists.precision_matrix = (
predicted_multivariate_normal_dists.precision_matrix.to(device)
)
# Compute negative log probability
negative_log_prob = -predicted_multivariate_normal_dists.log_prob(gt_box_means)
negative_log_prob_mean = negative_log_prob.mean()
output_dict.update(
{"ignorance_score_mean": negative_log_prob_mean.to(device).tolist()}
)
# Compute mean square error
mean_squared_error = ((predicted_box_means - gt_box_means) ** 2).mean()
output_dict.update({"mean_squared_error": mean_squared_error.to(device).tolist()})
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((1001,)).to(device)
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
energy_score = torch.norm((sample_set_1 - gt_box_means), dim=2).mean(
0
) - 0.5 * torch.norm((sample_set_1 - sample_set_2), dim=2).mean(0)
energy_score_mean = energy_score.mean()
output_dict.update({"energy_score_mean": energy_score_mean.to(device).tolist()})
return output_dict
def compute_reg_scores_fn(false_negatives, valid_idxs):
"""
Computes proper scoring rule for regression false positive.
Args:
false_negatives (dict): dictionary containing false_negatives
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing false positives ignorance and energy scores.
"""
output_dict = {}
predicted_box_means = false_negatives["predicted_box_means"][valid_idxs]
predicted_box_covars = false_negatives["predicted_box_covariances"][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_box_means.shape[0] == 0:
output_dict.update({"total_entropy_mean": None, "fp_energy_score_mean": None})
return output_dict
predicted_multivariate_normal_dists = (
torch.distributions.multivariate_normal.MultivariateNormal(
predicted_box_means.to("cpu"),
predicted_box_covars.to("cpu")
+ 1e-2 * torch.eye(predicted_box_covars.shape[2]).to("cpu"),
)
)
predicted_multivariate_normal_dists.loc = (
predicted_multivariate_normal_dists.loc.to(device)
)
predicted_multivariate_normal_dists.scale_tril = (
predicted_multivariate_normal_dists.scale_tril.to(device)
)
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = (
predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(device)
)
predicted_multivariate_normal_dists.covariance_matrix = (
predicted_multivariate_normal_dists.covariance_matrix.to(device)
)
predicted_multivariate_normal_dists.precision_matrix = (
predicted_multivariate_normal_dists.precision_matrix.to(device)
)
fp_entropy = predicted_multivariate_normal_dists.entropy()
fp_entropy_mean = fp_entropy.mean()
output_dict.update({"total_entropy_mean": fp_entropy_mean.to(device).tolist()})
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((1001,)).to(device)
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
fp_energy_score = torch.norm((sample_set_1 - sample_set_2), dim=2).mean(0)
fp_energy_score_mean = fp_energy_score.mean()
output_dict.update(
{"fp_energy_score_mean": fp_energy_score_mean.to(device).tolist()}
)
return output_dict
| 9,442
| 37.542857
| 117
|
py
|
pmb-nll
|
pmb-nll-main/src/core/evaluation_tools/evaluation_utils.py
|
import json
import os
from collections import defaultdict
import numpy as np
import torch
import tqdm
# Project imports
from core.datasets import metadata
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.structures import Boxes, Instances, pairwise_iou
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def eval_predictions_preprocess(
predicted_instances, min_allowed_score=0.0, is_odd=False
):
predicted_boxes = defaultdict(torch.Tensor)
predicted_cls_probs = defaultdict(torch.Tensor)
predicted_covar_mats = defaultdict(torch.Tensor)
ppp_weights = defaultdict(dict)
image_sizes = defaultdict(list)
for predicted_instance in predicted_instances:
# Remove predictions with undefined category_id. This is used when the training and
# inference datasets come from different data such as COCO-->VOC or COCO-->OpenImages.
# Only happens if not ODD dataset, else all detections will be removed.
if len(predicted_instance["cls_prob"]) == 81:
cls_prob = predicted_instance["cls_prob"][:-1]
else:
cls_prob = predicted_instance["cls_prob"]
if not is_odd:
skip_test = (predicted_instance["category_id"] == -1) or (
np.array(cls_prob).max(0) < min_allowed_score
)
else:
skip_test = np.array(cls_prob).max(0) < min_allowed_score
if skip_test:
continue
box_inds = predicted_instance["bbox"]
box_inds = np.array(
[
box_inds[0],
box_inds[1],
box_inds[0] + box_inds[2],
box_inds[1] + box_inds[3],
]
)
predicted_boxes[predicted_instance["image_id"]] = torch.cat(
(
predicted_boxes[predicted_instance["image_id"]].to(device),
torch.as_tensor([box_inds], dtype=torch.float32).to(device),
)
)
predicted_cls_probs[predicted_instance["image_id"]] = torch.cat(
(
predicted_cls_probs[predicted_instance["image_id"]].to(device),
torch.as_tensor(
[predicted_instance["cls_prob"]], dtype=torch.float32
).to(device),
)
)
box_covar = np.array(predicted_instance["bbox_covar"])
transformation_mat = np.array(
[[1.0, 0, 0, 0], [0, 1.0, 0, 0], [1.0, 0, 1.0, 0], [0, 1.0, 0.0, 1.0]]
)
cov_pred = np.matmul(
np.matmul(transformation_mat, box_covar), transformation_mat.T
).tolist()
predicted_covar_mats[predicted_instance["image_id"]] = torch.cat(
(
predicted_covar_mats[predicted_instance["image_id"]].to(device),
torch.as_tensor([cov_pred], dtype=torch.float32).to(device),
)
)
if "ppp" in predicted_instance:
ppp_dict = {
k: torch.as_tensor(v, dtype=torch.float32).to(device)
for k, v in predicted_instance["ppp"].items()
}
ppp_weights[predicted_instance["image_id"]] = ppp_dict
else:
ppp_weights[predicted_instance["image_id"]] = torch.as_tensor(np.nan).to(
device
)
image_sizes[predicted_instance["image_id"]] = predicted_instance["image_size"]
return dict(
{
"predicted_boxes": predicted_boxes,
"predicted_cls_probs": predicted_cls_probs,
"predicted_covar_mats": predicted_covar_mats,
"ppp_weights": ppp_weights,
"image_size": image_sizes,
}
)
def eval_gt_preprocess(gt_instances):
gt_boxes, gt_cat_idxs, gt_is_truncated, gt_is_occluded = (
defaultdict(torch.Tensor),
defaultdict(torch.Tensor),
defaultdict(torch.Tensor),
defaultdict(torch.Tensor),
)
for gt_instance in gt_instances:
box_inds = gt_instance["bbox"]
box_inds = np.array(
[
box_inds[0],
box_inds[1],
box_inds[0] + box_inds[2],
box_inds[1] + box_inds[3],
]
)
gt_boxes[gt_instance["image_id"]] = torch.cat(
(
gt_boxes[gt_instance["image_id"]].to(device),
torch.as_tensor([box_inds], dtype=torch.float32).to(device),
)
)
gt_cat_idxs[gt_instance["image_id"]] = torch.cat(
(
gt_cat_idxs[gt_instance["image_id"]].to(device),
torch.as_tensor([[gt_instance["category_id"]]], dtype=torch.float32).to(
device
),
)
)
if "is_truncated" in gt_instance.keys():
gt_is_truncated[gt_instance["image_id"]] = torch.cat(
(
gt_is_truncated[gt_instance["image_id"]].to(device),
torch.as_tensor(
[gt_instance["is_truncated"]], dtype=torch.float32
).to(device),
)
)
gt_is_occluded[gt_instance["image_id"]] = torch.cat(
(
gt_is_occluded[gt_instance["image_id"]].to(device),
torch.as_tensor(
[gt_instance["is_occluded"]], dtype=torch.float32
).to(device),
)
)
if "is_truncated" in gt_instances[0].keys():
return dict(
{
"gt_boxes": gt_boxes,
"gt_cat_idxs": gt_cat_idxs,
"gt_is_truncated": gt_is_truncated,
"gt_is_occluded": gt_is_occluded,
}
)
else:
return dict({"gt_boxes": gt_boxes, "gt_cat_idxs": gt_cat_idxs})
def get_matched_results(
cfg, inference_output_dir, iou_min=0.1, iou_correct=0.7, min_allowed_score=0.0
):
try:
matched_results = torch.load(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_{}.pth".format(
iou_min, iou_correct, min_allowed_score
),
),
map_location=device,
)
return matched_results
except FileNotFoundError:
(
preprocessed_predicted_instances,
preprocessed_gt_instances,
) = get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score
)
predicted_box_means = preprocessed_predicted_instances["predicted_boxes"]
predicted_cls_probs = preprocessed_predicted_instances["predicted_cls_probs"]
predicted_box_covariances = preprocessed_predicted_instances[
"predicted_covar_mats"
]
gt_box_means = preprocessed_gt_instances["gt_boxes"]
gt_cat_idxs = preprocessed_gt_instances["gt_cat_idxs"]
if "gt_is_truncated" in preprocessed_gt_instances.keys():
is_truncated = preprocessed_gt_instances["gt_is_truncated"]
else:
is_truncated = None
if "gt_is_occluded" in preprocessed_gt_instances.keys():
is_occluded = preprocessed_gt_instances["gt_is_occluded"]
else:
is_occluded = None
matched_results = match_predictions_to_groundtruth(
predicted_box_means,
predicted_cls_probs,
predicted_box_covariances,
gt_box_means,
gt_cat_idxs,
iou_min,
iou_correct,
is_truncated=is_truncated,
is_occluded=is_occluded,
)
torch.save(
matched_results,
os.path.join(
inference_output_dir,
"matched_results_{}_{}_{}.pth".format(
iou_min, iou_correct, min_allowed_score
),
),
)
return matched_results
def get_per_frame_preprocessed_gt_instances(cfg, inference_output_dir):
meta_catalog = MetadataCatalog.get(cfg.ACTUAL_TEST_DATASET)
# Process GT
print("Began pre-processing ground truth annotations...")
try:
preprocessed_gt_instances = torch.load(
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
map_location=device,
)
except FileNotFoundError:
gt_info = json.load(open(meta_catalog.json_file, "r"))
gt_instances = gt_info["annotations"]
preprocessed_gt_instances = eval_gt_preprocess(gt_instances)
torch.save(
preprocessed_gt_instances,
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
)
print("Done!")
return preprocessed_gt_instances
def get_per_frame_preprocessed_pred_instances(
cfg, inference_output_dir, img_id, min_allowed_score=0.0
):
print("Began pre-processing predicted instances...")
prediction_file_name = os.path.join(
inference_output_dir, "coco_instances_results.json"
)
predicted_instances = json.load(open(prediction_file_name))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score
)
img_size = preprocessed_predicted_instances["image_size"][img_id]
pred_boxes = Boxes(preprocessed_predicted_instances["predicted_boxes"][img_id])
pred_cls_probs = preprocessed_predicted_instances["predicted_cls_probs"][img_id]
pred_boxes_covariance = preprocessed_predicted_instances["predicted_covar_mats"][
img_id
]
scores, pred_classes = pred_cls_probs.max(dim=1)
instances = Instances(
image_size=img_size,
pred_boxes=pred_boxes,
pred_cls_probs=pred_cls_probs,
pred_boxes_covariance=pred_boxes_covariance,
scores=scores,
pred_classes=pred_classes,
)
print("Done!")
return instances
def get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score=0.0
):
prediction_file_name = os.path.join(
inference_output_dir, "coco_instances_results.json"
)
meta_catalog = MetadataCatalog.get(cfg.ACTUAL_TEST_DATASET)
# Process GT
print("Began pre-processing ground truth annotations...")
try:
preprocessed_gt_instances = torch.load(
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
map_location=device,
)
except FileNotFoundError:
gt_info = json.load(open(meta_catalog.json_file, "r"))
gt_instances = gt_info["annotations"]
preprocessed_gt_instances = eval_gt_preprocess(gt_instances)
torch.save(
preprocessed_gt_instances,
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
)
print("Done!")
print("Began pre-processing predicted instances...")
try:
preprocessed_predicted_instances = torch.load(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_{}.pth".format(min_allowed_score),
),
map_location=device,
)
# Process predictions
except FileNotFoundError:
predicted_instances = json.load(open(prediction_file_name))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score
)
torch.save(
preprocessed_predicted_instances,
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_{}.pth".format(min_allowed_score),
),
)
print("Done!")
return preprocessed_predicted_instances, preprocessed_gt_instances
def match_predictions_to_groundtruth(
predicted_box_means,
predicted_cls_probs,
predicted_box_covariances,
gt_box_means,
gt_cat_idxs,
iou_min=0.1,
iou_correct=0.7,
is_truncated=None,
is_occluded=None,
):
# Flag to know if truncation and occlusion should be saved:
trunc_occ_flag = is_truncated is not None and is_occluded is not None
true_positives = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"iou_with_ground_truth": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
}
)
localization_errors = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"iou_with_ground_truth": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
}
)
duplicates = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"iou_with_ground_truth": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
}
)
false_positives = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
}
)
false_negatives = dict(
{
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
"count": list(),
}
)
with tqdm.tqdm(total=len(predicted_box_means)) as pbar:
for key in predicted_box_means.keys():
pbar.update(1)
# Check if gt available, if not all detections go to false
# positives
if key not in gt_box_means.keys():
false_positives["predicted_box_means"] = torch.cat(
(false_positives["predicted_box_means"], predicted_box_means[key])
)
false_positives["predicted_cls_probs"] = torch.cat(
(false_positives["predicted_cls_probs"], predicted_cls_probs[key])
)
false_positives["predicted_box_covariances"] = torch.cat(
(
false_positives["predicted_box_covariances"],
predicted_box_covariances[key],
)
)
false_negatives["count"].append((key, 0))
continue
# Compute iou between gt boxes and all predicted boxes in frame
frame_gt_boxes = Boxes(gt_box_means[key])
frame_predicted_boxes = Boxes(predicted_box_means[key])
num_predictions_in_frame = frame_predicted_boxes.tensor.shape[0]
match_iou = pairwise_iou(frame_gt_boxes, frame_predicted_boxes)
# False positives are detections that have an iou < match iou with
# any ground truth object.
false_positive_idxs = (match_iou <= iou_min).all(0)
false_positives["predicted_box_means"] = torch.cat(
(
false_positives["predicted_box_means"],
predicted_box_means[key][false_positive_idxs],
)
)
false_positives["predicted_cls_probs"] = torch.cat(
(
false_positives["predicted_cls_probs"],
predicted_cls_probs[key][false_positive_idxs],
)
)
false_positives["predicted_box_covariances"] = torch.cat(
(
false_positives["predicted_box_covariances"],
predicted_box_covariances[key][false_positive_idxs],
)
)
num_fp_in_frame = false_positive_idxs.sum(0)
# True positives are any detections with match iou > iou correct. We need to separate these detections to
# True positive and duplicate set. The true positive detection is the detection assigned the highest score
# by the neural network.
true_positive_idxs = torch.nonzero(match_iou >= iou_correct, as_tuple=False)
# Setup tensors to allow assignment of detections only once.
processed_gt = torch.tensor([]).type(torch.LongTensor).to(device)
predictions_idxs_processed = (
torch.tensor([]).type(torch.LongTensor).to(device)
)
for i in torch.arange(frame_gt_boxes.tensor.shape[0]):
# Check if true positive has been previously assigned to a ground truth box and remove it if this is
# the case. Very rare occurrence but need to handle it
# nevertheless.
prediction_idxs = true_positive_idxs[true_positive_idxs[:, 0] == i][
:, 1
]
non_valid_idxs = torch.nonzero(
predictions_idxs_processed[..., None] == prediction_idxs,
as_tuple=False,
)
if non_valid_idxs.shape[0] > 0:
prediction_idxs[non_valid_idxs[:, 1]] = -1
prediction_idxs = prediction_idxs[prediction_idxs != -1]
if prediction_idxs.shape[0] > 0:
# If there is a prediction attached to gt, count it as
# processed.
processed_gt = torch.cat(
(processed_gt, i.unsqueeze(0).to(processed_gt.device))
)
predictions_idxs_processed = torch.cat(
(predictions_idxs_processed, prediction_idxs)
)
current_matches_predicted_cls_probs = predicted_cls_probs[key][
prediction_idxs
]
max_score, _ = torch.max(current_matches_predicted_cls_probs, 1)
_, max_idxs = max_score.topk(max_score.shape[0])
if max_idxs.shape[0] > 1:
max_idx = max_idxs[0]
duplicate_idxs = max_idxs[1:]
else:
max_idx = max_idxs
duplicate_idxs = torch.empty(0).to(device)
current_matches_predicted_box_means = predicted_box_means[key][
prediction_idxs
]
current_matches_predicted_box_covariances = (
predicted_box_covariances[key][prediction_idxs]
)
# Highest scoring detection goes to true positives
true_positives["predicted_box_means"] = torch.cat(
(
true_positives["predicted_box_means"],
current_matches_predicted_box_means[
max_idx : max_idx + 1, :
],
)
)
true_positives["predicted_cls_probs"] = torch.cat(
(
true_positives["predicted_cls_probs"],
current_matches_predicted_cls_probs[
max_idx : max_idx + 1, :
],
)
)
true_positives["predicted_box_covariances"] = torch.cat(
(
true_positives["predicted_box_covariances"],
current_matches_predicted_box_covariances[
max_idx : max_idx + 1, :
],
)
)
true_positives["gt_box_means"] = torch.cat(
(
true_positives["gt_box_means"],
gt_box_means[key][i : i + 1, :],
)
)
true_positives["gt_cat_idxs"] = torch.cat(
(true_positives["gt_cat_idxs"], gt_cat_idxs[key][i : i + 1, :])
)
if trunc_occ_flag:
true_positives["is_truncated"] = torch.cat(
(
true_positives["is_truncated"],
is_truncated[key][i : i + 1],
)
)
true_positives["is_occluded"] = torch.cat(
(true_positives["is_occluded"], is_occluded[key][i : i + 1])
)
true_positives["iou_with_ground_truth"] = torch.cat(
(
true_positives["iou_with_ground_truth"],
match_iou[i, prediction_idxs][max_idx : max_idx + 1],
)
)
# Lower scoring redundant detections go to duplicates
if duplicate_idxs.shape[0] > 1:
duplicates["predicted_box_means"] = torch.cat(
(
duplicates["predicted_box_means"],
current_matches_predicted_box_means[duplicate_idxs, :],
)
)
duplicates["predicted_cls_probs"] = torch.cat(
(
duplicates["predicted_cls_probs"],
current_matches_predicted_cls_probs[duplicate_idxs, :],
)
)
duplicates["predicted_box_covariances"] = torch.cat(
(
duplicates["predicted_box_covariances"],
current_matches_predicted_box_covariances[
duplicate_idxs, :
],
)
)
duplicates["gt_box_means"] = torch.cat(
(
duplicates["gt_box_means"],
gt_box_means[key][
np.repeat(i, duplicate_idxs.shape[0]), :
],
)
)
duplicates["gt_cat_idxs"] = torch.cat(
(
duplicates["gt_cat_idxs"],
gt_cat_idxs[key][
np.repeat(i, duplicate_idxs.shape[0]), :
],
)
)
if trunc_occ_flag:
duplicates["is_truncated"] = torch.cat(
(
duplicates["is_truncated"],
is_truncated[key][
np.repeat(i, duplicate_idxs.shape[0])
],
)
)
duplicates["is_occluded"] = torch.cat(
(
duplicates["is_occluded"],
is_occluded[key][
np.repeat(i, duplicate_idxs.shape[0])
],
)
)
duplicates["iou_with_ground_truth"] = torch.cat(
(
duplicates["iou_with_ground_truth"],
match_iou[i, prediction_idxs][duplicate_idxs],
)
)
elif duplicate_idxs.shape[0] == 1:
# Special case when only one duplicate exists, required to
# index properly for torch.cat
duplicates["predicted_box_means"] = torch.cat(
(
duplicates["predicted_box_means"],
current_matches_predicted_box_means[
duplicate_idxs : duplicate_idxs + 1, :
],
)
)
duplicates["predicted_cls_probs"] = torch.cat(
(
duplicates["predicted_cls_probs"],
current_matches_predicted_cls_probs[
duplicate_idxs : duplicate_idxs + 1, :
],
)
)
duplicates["predicted_box_covariances"] = torch.cat(
(
duplicates["predicted_box_covariances"],
current_matches_predicted_box_covariances[
duplicate_idxs : duplicate_idxs + 1, :
],
)
)
duplicates["gt_box_means"] = torch.cat(
(
duplicates["gt_box_means"],
gt_box_means[key][i : i + 1, :],
)
)
duplicates["gt_cat_idxs"] = torch.cat(
(duplicates["gt_cat_idxs"], gt_cat_idxs[key][i : i + 1, :])
)
if trunc_occ_flag:
duplicates["is_truncated"] = torch.cat(
(
duplicates["is_truncated"],
is_truncated[key][i : i + 1],
)
)
duplicates["is_occluded"] = torch.cat(
(duplicates["is_occluded"], is_occluded[key][i : i + 1])
)
duplicates["iou_with_ground_truth"] = torch.cat(
(
duplicates["iou_with_ground_truth"],
match_iou[i, prediction_idxs][
duplicate_idxs : duplicate_idxs + 1
],
)
)
num_tp_dup_in_frame = predictions_idxs_processed.shape[0]
# Process localization errors. Localization errors are detections with iou < 0.5 with any ground truth.
# Mask out processed true positives/duplicates so they are not
# re-associated with another gt
# ToDo Localization Errors and False Positives are constant, do not change. We could generate them only
# once.
match_iou[:, true_positive_idxs[:, 1]] *= 0.0
localization_errors_idxs = torch.nonzero(
(match_iou > iou_min) & (match_iou < 0.5), as_tuple=False
)
# Setup tensors to allow assignment of detections only once.
processed_localization_errors = (
torch.tensor([]).type(torch.LongTensor).to(device)
)
for localization_error_idx in localization_errors_idxs[:, 1]:
# If localization error has been processed, skip iteration.
if (processed_localization_errors == localization_error_idx).any():
continue
# For every localization error, assign the ground truth with
# highest IOU.
gt_loc_error_idxs = localization_errors_idxs[
localization_errors_idxs[:, 1] == localization_error_idx
]
ious_with_gts = match_iou[
gt_loc_error_idxs[:, 0], gt_loc_error_idxs[:, 1]
]
gt_loc_error_idxs = gt_loc_error_idxs[:, 0]
# Choose the gt with the largest IOU with localization error
if gt_loc_error_idxs.shape[0] > 1:
sorted_idxs = ious_with_gts.sort(descending=True)[1]
gt_loc_error_idxs = gt_loc_error_idxs[
sorted_idxs[0] : sorted_idxs[0] + 1
]
processed_gt = torch.cat((processed_gt, gt_loc_error_idxs))
localization_errors["predicted_box_means"] = torch.cat(
(
localization_errors["predicted_box_means"],
predicted_box_means[key][
localization_error_idx : localization_error_idx + 1, :
],
)
)
localization_errors["predicted_cls_probs"] = torch.cat(
(
localization_errors["predicted_cls_probs"],
predicted_cls_probs[key][
localization_error_idx : localization_error_idx + 1, :
],
)
)
localization_errors["predicted_box_covariances"] = torch.cat(
(
localization_errors["predicted_box_covariances"],
predicted_box_covariances[key][
localization_error_idx : localization_error_idx + 1, :
],
)
)
localization_errors["gt_box_means"] = torch.cat(
(
localization_errors["gt_box_means"],
gt_box_means[key][gt_loc_error_idxs : gt_loc_error_idxs + 1, :],
)
)
localization_errors["gt_cat_idxs"] = torch.cat(
(
localization_errors["gt_cat_idxs"],
gt_cat_idxs[key][gt_loc_error_idxs : gt_loc_error_idxs + 1],
)
)
if trunc_occ_flag:
localization_errors["is_truncated"] = torch.cat(
(
localization_errors["is_truncated"],
is_truncated[key][
gt_loc_error_idxs : gt_loc_error_idxs + 1
],
)
)
localization_errors["is_occluded"] = torch.cat(
(
localization_errors["is_occluded"],
is_occluded[key][gt_loc_error_idxs : gt_loc_error_idxs + 1],
)
)
localization_errors["iou_with_ground_truth"] = torch.cat(
(
localization_errors["iou_with_ground_truth"],
match_iou[
gt_loc_error_idxs,
localization_error_idx : localization_error_idx + 1,
],
)
)
# Append processed localization errors
processed_localization_errors = torch.cat(
(processed_localization_errors, localization_error_idx.unsqueeze(0))
)
# Assert that the total number of processed predictions do not exceed the number of predictions in frame.
num_loc_errors_in_frame = processed_localization_errors.shape[0]
num_processed_predictions = (
num_loc_errors_in_frame + num_fp_in_frame + num_tp_dup_in_frame
)
# At the limit where iou_correct=0.5, equality holds.
assert num_processed_predictions <= num_predictions_in_frame
# Get false negative ground truth, which are fully missed.
# These can be found by looking for GT instances not processed.
processed_gt = processed_gt.unique()
false_negative_idxs = torch.ones(frame_gt_boxes.tensor.shape[0])
false_negative_idxs[processed_gt] = 0
false_negative_idxs = false_negative_idxs.type(torch.bool)
false_negatives["gt_box_means"] = torch.cat(
(
false_negatives["gt_box_means"],
gt_box_means[key][false_negative_idxs],
)
)
false_negatives["gt_cat_idxs"] = torch.cat(
(false_negatives["gt_cat_idxs"], gt_cat_idxs[key][false_negative_idxs])
)
false_negatives["count"].append(
(key, gt_box_means[key][false_negative_idxs].shape[0])
)
if trunc_occ_flag:
false_negatives["is_truncated"] = torch.cat(
(
false_negatives["is_truncated"],
is_truncated[key][false_negative_idxs],
)
)
false_negatives["is_occluded"] = torch.cat(
(
false_negatives["is_occluded"],
is_occluded[key][false_negative_idxs],
)
)
matched_results = dict()
matched_results.update(
{
"true_positives": true_positives,
"localization_errors": localization_errors,
"duplicates": duplicates,
"false_positives": false_positives,
"false_negatives": false_negatives,
}
)
return matched_results
def get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
):
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
if (
train_thing_dataset_id_to_contiguous_id
== test_thing_dataset_id_to_contiguous_id
):
cat_mapping_dict = dict(
(v, k) for k, v in test_thing_dataset_id_to_contiguous_id.items()
)
else:
# If not equal, three situations: 1) BDD to KITTI, 2) COCO to PASCAL,
# or 3) COCO to OpenImages
cat_mapping_dict = dict(
(v, k) for k, v in test_thing_dataset_id_to_contiguous_id.items()
)
if "voc" in args.test_dataset and "coco" in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.COCO_TO_VOC_CONTIGUOUS_ID.items()
)
if "openimages" in args.test_dataset and "coco" in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.COCO_TO_OPENIMAGES_CONTIGUOUS_ID.items()
)
elif "kitti" in args.test_dataset and "bdd" in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.BDD_TO_KITTI_CONTIGUOUS_ID.items()
)
else:
ValueError(
"Cannot generate category mapping dictionary. Please check if training and inference datasets are compatible."
)
cat_mapping_dict = dict(
(dataset_mapping_dict[k], v) for k, v in cat_mapping_dict.items()
)
return cat_mapping_dict
def get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
):
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
return {v: k for k, v in cat_mapping_dict.items()}
def calculate_iou(bb1, bb2):
# determine the coordinates of the intersection rectangle
x_left = max(bb1[0], bb2[0])
y_top = max(bb1[1], bb2[1])
x_right = min(bb1[2], bb2[2])
y_bottom = min(bb1[3], bb2[3])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box.
# NOTE: We MUST ALWAYS add +1 to calculate area when working in
# screen coordinates, since 0,0 is the top left pixel, and w-1,h-1
# is the bottom right pixel. If we DON'T add +1, the result is wrong.
intersection_area = (x_right - x_left + 1) * (y_bottom - y_top + 1)
# compute the area of both AABBs
bb1_area = (bb1[2] - bb1[0] + 1) * (bb1[3] - bb1[1] + 1)
bb2_area = (bb2[2] - bb2[0] + 1) * (bb2[3] - bb2[1] + 1)
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
return iou
| 37,914
| 39.079281
| 126
|
py
|
pmb-nll
|
pmb-nll-main/src/core/evaluation_tools/__init__.py
| 0
| 0
| 0
|
py
|
|
pmb-nll
|
pmb-nll-main/src/core/visualization_tools/results_processing_tools.py
|
import glob
import itertools
import numpy as np
import os
import pickle
import torch
from collections import defaultdict
# Project imports
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
def get_clean_results_dict(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 3, 5, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(list))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
'probabilistic_scoring_res_averaged_*.pkl'))[0]
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
prob_dict_name = 'probabilistic_scoring_res_averaged_*.pkl' if image_corruption_level == 'OpenIm' else 'probabilistic_scoring_res_odd_*.pkl'
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
prob_dict_name))[0]
with open(dictionary_file_name, "rb") as pickle_file:
res_dict = pickle.load(pickle_file)
if image_corruption_level != 'OpenIm OOD':
# True Positives Results
res_dict_clean['True Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['true_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Brier Score'].extend(
res_dict['true_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['True Positives']['Negative Log Likelihood (Regression)'].extend(
res_dict['true_positives_reg_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Mean Squared Error'].extend(
res_dict['true_positives_reg_analysis']['mean_squared_error'])
res_dict_clean['True Positives']['Energy Score'].extend(
res_dict['true_positives_reg_analysis']['energy_score_mean'])
res_dict_clean['True Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['True Positives']['Method Name'].extend(
[config_name] * res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
# Duplicates Results
res_dict_clean['Duplicates']['Negative Log Likelihood (Classification)'].extend(
res_dict['duplicates_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Brier Score'].extend(
res_dict['duplicates_cls_analysis']['brier_score_mean'])
res_dict_clean['Duplicates']['Negative Log Likelihood (Regression)'].extend(
res_dict['duplicates_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Mean Squared Error'].extend(
res_dict['duplicates_reg_analysis']['mean_squared_error'])
res_dict_clean['Duplicates']['Energy Score'].extend(
res_dict['duplicates_reg_analysis']['energy_score_mean'])
res_dict_clean['Duplicates']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Duplicates']['Method Name'].extend(
[config_name] * res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
# Localization Error Results
res_dict_clean['Localization Errors']['Negative Log Likelihood (Classification)'].extend(
res_dict['localization_errors_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Brier Score'].extend(
res_dict['localization_errors_cls_analysis']['brier_score_mean'])
res_dict_clean['Localization Errors']['Negative Log Likelihood (Regression)'].extend(
res_dict['localization_errors_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Mean Squared Error'].extend(
res_dict['localization_errors_reg_analysis']['mean_squared_error'])
res_dict_clean['Localization Errors']['Energy Score'].extend(
res_dict['localization_errors_reg_analysis']['energy_score_mean'])
res_dict_clean['Localization Errors']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Localization Errors']['Method Name'].extend(
[config_name] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['false_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].extend(
res_dict['false_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].extend(
res_dict['false_positives_reg_analysis']['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
res_dict_clean['False Positives']['Method Name'].extend(
[config_name] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
else:
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].append(
res_dict['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].append(
res_dict['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].append(
res_dict['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].append(
image_corruption_level)
res_dict_clean['False Positives']['Method Name'].append(
config_name)
return res_dict_clean
def get_mAP_results(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 2, 3, 4, 5, 10]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
mAP_results = defaultdict(list)
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
text_file_name = glob.glob(
os.path.join(
inference_output_dir,
'mAP_res.txt'))[0]
with open(text_file_name, "r") as f:
mAP = f.read().strip('][\n').split(', ')[0]
mAP = float(mAP) * 100
mAP_results['Method Name'].append(config_name)
mAP_results['Image Corruption Level'].append(
image_corruption_level)
mAP_results['mAP'].append(mAP)
return mAP_results
def get_matched_results_dicts(config_names,
configs_list,
inference_configs_list,
iou_min=0.1,
iou_correct=0.5):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get matched results by either generating them or loading from
# file.
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
elif image_corruption_level == 'OpenIm':
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_*.pth"))[0]
preprocessed_predicted_instances = torch.load(
dictionary_file_name, map_location='cuda')
predicted_boxes = preprocessed_predicted_instances['predicted_boxes']
predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_boxes = list(itertools.chain.from_iterable(
[predicted_boxes[key] for key in predicted_boxes.keys()]))
predicted_cov_mats = list(itertools.chain.from_iterable(
[predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))
predicted_cls_probs = list(itertools.chain.from_iterable(
[predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))
predicted_boxes = torch.stack(
predicted_boxes, 1).transpose(
0, 1)
predicted_cov_mats = torch.stack(
predicted_cov_mats, 1).transpose(0, 1)
predicted_cls_probs = torch.stack(
predicted_cls_probs,
1).transpose(
0,
1)
matched_results = {
'predicted_box_means': predicted_boxes,
'predicted_box_covariances': predicted_cov_mats,
'predicted_cls_probs': predicted_cls_probs}
if image_corruption_level != 'OpenIm OOD':
all_results_means = torch.cat(
(matched_results['true_positives']['predicted_box_means'],
matched_results['localization_errors']['predicted_box_means'],
matched_results['duplicates']['predicted_box_means'],
matched_results['false_positives']['predicted_box_means']))
all_results_covs = torch.cat(
(matched_results['true_positives']['predicted_box_covariances'],
matched_results['localization_errors']['predicted_box_covariances'],
matched_results['duplicates']['predicted_box_covariances'],
matched_results['false_positives']['predicted_box_covariances']))
all_gt_means = torch.cat(
(matched_results['true_positives']['gt_box_means'],
matched_results['localization_errors']['gt_box_means'],
matched_results['duplicates']['gt_box_means'],
matched_results['false_positives']['predicted_box_means']*np.NaN))
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
all_results_means.to('cpu'),
all_results_covs.to('cpu') +
1e-2 *
torch.eye(all_results_covs.shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
all_log_prob = -predicted_multivariate_normal_dists.log_prob(all_gt_means)
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((3,)).to('cuda')
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
energy_score = torch.norm(
(sample_set_1 - all_gt_means),
dim=2).mean(0) - 0.5 * torch.norm(
(sample_set_1 - sample_set_2),
dim=2).mean(0)
mse_loss = torch.nn.MSELoss(reduction='none')
mse = mse_loss(all_gt_means, all_results_means).mean(1)
res_dict_clean[config_name][image_corruption_level]['Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['MSE'].extend(
mse.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['NLL'].extend(
all_log_prob.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['ED'].extend(
energy_score.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.cat(
(matched_results['true_positives']['iou_with_ground_truth'],
matched_results['localization_errors']['iou_with_ground_truth'][:, 0],
matched_results['duplicates']['iou_with_ground_truth'],
torch.zeros(
matched_results['false_positives']['predicted_box_means'].shape[0]).to('cuda')*np.NaN)).cpu().numpy())
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['false_positives']['predicted_box_means'].to('cpu'),
matched_results['false_positives']['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['false_positives']['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
FP_Entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
FP_Entropy.cpu().numpy())
predicted_cat_dists_fp = matched_results['false_positives']['predicted_cls_probs']
if predicted_cat_dists_fp.shape[1] == 80:
predicted_cat_dists_fp, _ = predicted_cat_dists_fp.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=matched_results['false_positives']['predicted_cls_probs'])
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
if image_corruption_level == 'OpenIm':
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(matched_results['true_positives']['is_truncated'],
matched_results['localization_errors']['is_truncated'],
matched_results['duplicates']['is_truncated'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(matched_results['true_positives']['is_occluded'],
matched_results['localization_errors']['is_occluded'],
matched_results['duplicates']['is_occluded'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['predicted_box_means'].to('cpu'),
matched_results['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.zeros(
matched_results['predicted_box_means'].shape[0]).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
all_results_cat = matched_results['predicted_cls_probs']
if all_results_cat.shape[1] == 80:
predicted_cat_dists_fp, _ = all_results_cat.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=all_results_cat)
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
return res_dict_clean
def mean_reject_outliers(x, outlierConstant=1.5):
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * outlierConstant
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
result = a[np.where((a >= quartileSet[0]) & (a <= quartileSet[1]))]
return np.nanmean(result)
| 30,031
| 53.703097
| 161
|
py
|
pmb-nll
|
pmb-nll-main/src/core/visualization_tools/probabilistic_visualizer.py
|
import matplotlib as mpl
import numpy as np
from detectron2.utils.colormap import random_color
from detectron2.utils.visualizer import _SMALL_OBJECT_AREA_THRESH, ColorMode, Visualizer
from scipy.stats import chi2, norm
class ProbabilisticVisualizer(Visualizer):
"""
Extends detectron2 Visualizer to draw corner covariance matrices.
"""
def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE):
super().__init__(img_rgb, metadata, scale=scale, instance_mode=instance_mode)
def overlay_covariance_instances(
self,
*,
boxes=None,
covariance_matrices=None,
labels=None,
assigned_colors=None,
alpha=0.5
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
covariance_matrices (ndarray): numpy array containing the corner covariance matrices
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
alpha: alpha value
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [
random_color(rgb=True, maximum=1) for _ in range(num_instances)
]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
covariance_matrices = (
covariance_matrices[sorted_idxs]
if covariance_matrices is not None
else None
)
for i in range(num_instances):
color = assigned_colors[i]
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
if boxes is not None:
self.draw_box(boxes[i], edge_color=lighter_color, alpha=alpha)
if covariance_matrices is not None:
self.draw_ellipse(
boxes[i],
covariance_matrices[i],
edge_color=lighter_color,
alpha=alpha,
)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
# if drawing boxes, put text on the box corner.
text_pos = (x0, y0)
horiz_align = "left"
else:
# drawing the box confidence for keypoints isn't very
# useful.
continue
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(
self.output.height * self.output.width
)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.75
* self._default_font_size
)
try:
score = float(labels[i].split(":")[-1])
if score > 0.5:
text_pos = (x0, y0)
else:
text_pos = (x0, y0 - font_size * 1.5)
except:
pass
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
alpha=alpha,
)
return self.output
def draw_ellipse(self, box_coord, cov, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
cov (nd array): 4x4 corner covariance matrix.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
linewidth = max(self._default_font_size / 4, 1)
width, height, rotation = self.cov_ellipse(cov[0:2, 0:2])
width[width < 0] = 0
height[height < 0] = 0
if not (np.isnan(width) or np.isnan(height) or np.isnan(rotation)):
width = width.astype(np.int32)
height = height.astype(np.int32)
rotation = rotation.astype(np.int32) + 180
self.output.ax.add_patch(
mpl.patches.Ellipse(
(x0, y0),
width,
height,
angle=rotation,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
width, height, rotation = self.cov_ellipse((cov[2:4, 2:4]))
width[width < 0] = 0
height[height < 0] = 0
if not (np.isnan(width) or np.isnan(height) or np.isnan(rotation)):
width = width.astype(np.int32)
height = height.astype(np.int32)
rotation = rotation.astype(np.int32) + 180
self.output.ax.add_patch(
mpl.patches.Ellipse(
(x1, y1),
width,
height,
angle=rotation,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5
):
"""
Modified from super class to give access to alpha for box plotting.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [
random_color(rgb=True, maximum=1) for _ in range(num_instances)
]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color, alpha=alpha)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
# if drawing boxes, put text on the box corner.
text_pos = (x0, y0)
horiz_align = "left"
elif masks is not None:
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
# drawing the box confidence for keypoints isn't very
# useful.
continue
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(
self.output.height * self.output.width
)
lighter_color = self._change_color_brightness(
color, brightness_factor=0.7
)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.75
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
@staticmethod
def cov_ellipse(cov, q=None, nsig=2):
"""
Parameters
----------
cov : (2, 2) array
Covariance matrix.
q : float, optional
Confidence level, should be in (0, 1).
nsig : int, optional
Confidence level in unit of standard deviations.
E.g. 1 stands for 68.3% and 2 stands for 95.4%.
Returns
-------
width, height, rotation :
The lengths of two axises and the rotation angle in degree
for the ellipse.
"""
if q is not None:
q = np.asarray(q)
elif nsig is not None:
q = 2 * norm.cdf(nsig) - 1
else:
raise ValueError("One of `q` and `nsig` should be specified.")
r2 = chi2.ppf(q, 2)
val, vec = np.linalg.eigh(cov)
width, height = 2 * np.sqrt(val[:, None] * r2)
rotation = np.degrees(np.arctan2(*vec[::-1, 0]))
return width, height, rotation
| 13,659
| 36.734807
| 98
|
py
|
pmb-nll
|
pmb-nll-main/src/core/visualization_tools/__init__.py
| 0
| 0
| 0
|
py
|
|
pmb-nll
|
pmb-nll-main/src/probabilistic_inference/image_corruptions.py
|
"""
Code for image corruption based on: https://github.com/hendrycks/robustness/tree/master/ImageNet-C/imagenet_c
Code is modified by authors of this paper to support arbitrary image sizes.
"""
import ctypes
import cv2
import numpy as np
import skimage as sk
from io import BytesIO
from PIL import Image as PILImage
from pkg_resources import resource_filename
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
from skimage.filters import gaussian
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of
# https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=256, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * \
np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize //
2:mapsize:stepsize, stepsize //
2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize //
2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize,
0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
w = img.shape[1]
# ceil crop height(= crop width)
ch = int(np.ceil(h / float(zoom_factor)))
cw = int(np.ceil(w / float(zoom_factor)))
top = (h - ch) // 2
side = (w - cw) // 2
img = scizoom(img[top:top + ch, side:side + cw],
(zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
trim_side = (img.shape[1] - w) // 2
return img[trim_top:trim_top + h, trim_side:trim_side + w]
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def gaussian_noise(x, severity=1):
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, severity=1):
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
return np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
def impulse_noise(x, severity=1):
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, severity=1):
c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]
x = np.array(x) / 255.
return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def gaussian_blur(x, severity=1):
c = [1, 2, 3, 4, 6][severity - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, severity=1):
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3),
(1.1, 3, 2), (1.5, 4, 2)][severity - 1]
x = np.uint8(
gaussian(
np.array(x) /
255.,
sigma=c[0],
multichannel=True) *
255)
h_max = x.shape[0]
w_max = x.shape[1]
# locally shuffle pixels
for i in range(c[2]):
for h in range(h_max - c[1], c[1], -1):
for w in range(w_max - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return np.clip(
gaussian(
x / 255.,
sigma=c[0],
multichannel=True),
0,
1) * 255
def defocus_blur(x, severity=1):
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose(
(1, 2, 0))
return np.clip(channels, 0, 1) * 255
def motion_blur(x, severity=1):
c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]
output = BytesIO()
x.save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))
x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
if len(x.shape) != 2:
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, severity=1):
c = [np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, severity=1):
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
fractal = cv2.resize(
plasma_fractal(
wibbledecay=c[1]),
(x.shape[1],
x.shape[0]))
x += c[0] * fractal[..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
def frost(x, severity=1):
c = [(1, 0.4),
(0.8, 0.6),
(0.7, 0.7),
(0.65, 0.7),
(0.6, 0.75)][severity - 1]
idx = np.random.randint(5)
filename = [resource_filename(__name__, 'frost/frost1.png'),
resource_filename(__name__, 'frost/frost2.png'),
resource_filename(__name__, 'frost/frost3.png'),
resource_filename(__name__, 'frost/frost4.jpg'),
resource_filename(__name__, 'frost/frost5.jpg'),
resource_filename(__name__, 'frost/frost6.jpg')][idx]
frost_im = cv2.imread(filename)
frost_im = cv2.resize(frost_im, x.size)
# convert to rgb
frost_im = frost_im[..., [2, 1, 0]]
return np.clip(c[0] * np.array(x) + c[1] * frost_im, 0, 255)
def snow(x, severity=1):
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
(0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7),
(0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
(0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
snow_layer = np.random.normal(
size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray(
(np.clip(
snow_layer.squeeze(),
0,
1) *
255).astype(
np.uint8),
mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(
radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(x,
cv2.cvtColor(x,
cv2.COLOR_RGB2GRAY).reshape(x.shape[0],
x.shape[1],
1) * 1.5 + 0.5)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, severity=1):
c = [(0.65, 0.3, 4, 0.69, 0.6, 0),
(0.65, 0.3, 3, 0.68, 0.6, 0),
(0.65, 0.3, 2, 0.68, 0.5, 0),
(0.65, 0.3, 1, 0.65, 1.5, 1),
(0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1])), axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return cv2.cvtColor(
np.clip(
x + m * color,
0,
1),
cv2.COLOR_BGRA2BGR) * 255
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# mud brown
color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),
42 / 255. * np.ones_like(x[..., :1]),
20 / 255. * np.ones_like(x[..., :1])), axis=2)
color *= m[..., np.newaxis]
x *= (1 - m[..., np.newaxis])
return np.clip(x + color, 0, 1) * 255
def contrast(x, severity=1):
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def brightness(x, severity=1):
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, severity=1):
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, severity=1):
c = [25, 18, 15, 10, 7][severity - 1]
output = BytesIO()
x.save(output, 'JPEG', quality=c)
x = PILImage.open(output)
return np.array(x)
def pixelate(x, severity=1):
h_max = x.size[1]
w_max = x.size[0]
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
x = x.resize((int(w_max * c), int(h_max * c)), PILImage.BOX)
x = x.resize((w_max, h_max), PILImage.BOX)
return np.array(x)
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, severity=1):
h_max = image.size[1]
w_max = image.size[0]
elastic_shape = 244
c = [(w_max * 2, h_max * 0.7, elastic_shape * 0.1),
(w_max * 2, h_max * 0.08, elastic_shape * 0.2),
(w_max * 0.05, h_max, elastic_shape * 0.02),
(w_max * 0.07, h_max * 0.01, elastic_shape * 0.02),
(w_max * 0.12, h_max * 0.01, elastic_shape * 0.02)][severity - 1]
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size,
center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + np.random.uniform(-c[2],
c[2],
size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(
image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(
shape[1]), np.arange(
shape[0]), np.arange(
shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + \
dx, (-1, 1)), np.reshape(z, (-1, 1))
return np.clip(
map_coordinates(
image,
indices,
order=1,
mode='reflect').reshape(shape),
0,
1) * 255
# /////////////// End Corruptions ///////////////
corruption_tuple = (
gaussian_noise,
shot_noise,
impulse_noise,
defocus_blur,
glass_blur,
motion_blur,
zoom_blur,
snow,
frost,
fog,
brightness,
contrast,
elastic_transform,
pixelate,
jpeg_compression,
speckle_noise,
gaussian_blur,
spatter,
saturate)
corruption_dict = {
corr_func.__name__: corr_func for corr_func in corruption_tuple}
| 16,125
| 30.55773
| 109
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_inference/probabilistic_retinanet_predictor.py
|
import numpy as np
import torch
import math
# Detectron Imports
from detectron2.layers import batched_nms, cat
from detectron2.structures import Boxes, Instances, pairwise_iou
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class RetinaNetProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# Create transform
self.sample_box2box_transform = inference_utils.SampleBox2BoxTransform(
self.cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
def retinanet_probabilistic_inference(
self,
input_im,
outputs=None,
ensemble_inference=False,
outputs_list=None):
"""
General RetinaNet probabilistic anchor-wise inference. Preliminary inference step for many post-processing
based inference methods such as standard_nms, output_statistics, and bayes_od.
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): outputs from model.forward. Will be computed internally if not provided.
ensemble_inference (bool): True if ensembles are used for inference. If set to true, outputs_list must be externally provided.
outputs_list (list): List of model() outputs, usually generated from ensembles of models.
Returns:
all_predicted_boxes,
all_predicted_boxes_covariance (Tensor): Nx4x4 vectors used
all_predicted_prob (Tensor): Nx1 scores which represent max of all_pred_prob_vectors. For usage in NMS and mAP computation.
all_classes_idxs (Tensor): Nx1 Class ids to be used for NMS.
all_predicted_prob_vectors (Tensor): NxK tensor where K is the number of classes.
"""
is_epistemic = ((self.mc_dropout_enabled and self.num_mc_dropout_runs > 1)
or ensemble_inference) and outputs is None
if is_epistemic:
if self.mc_dropout_enabled and self.num_mc_dropout_runs > 1:
outputs_list = self.model(
input_im,
return_anchorwise_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs)
n_fms = len(self.model.in_features)
outputs_list = [{key: value[i * n_fms:(i + 1) * n_fms] if value is not None else value for key,
value in outputs_list.items()} for i in range(self.num_mc_dropout_runs)]
outputs = {'anchors': outputs_list[0]['anchors']}
# Compute box classification and classification variance means
box_cls = [output['box_cls'] for output in outputs_list]
box_cls_mean = box_cls[0]
for i in range(len(box_cls) - 1):
box_cls_mean = [box_cls_mean[j] + box_cls[i][j]
for j in range(len(box_cls_mean))]
box_cls_mean = [
box_cls_f_map /
len(box_cls) for box_cls_f_map in box_cls_mean]
outputs.update({'box_cls': box_cls_mean})
if outputs_list[0]['box_cls_var'] is not None:
box_cls_var = [output['box_cls_var']
for output in outputs_list]
box_cls_var_mean = box_cls_var[0]
for i in range(len(box_cls_var) - 1):
box_cls_var_mean = [
box_cls_var_mean[j] +
box_cls_var[i][j] for j in range(
len(box_cls_var_mean))]
box_cls_var_mean = [
box_cls_var_f_map /
len(box_cls_var) for box_cls_var_f_map in box_cls_var_mean]
else:
box_cls_var_mean = None
outputs.update({'box_cls_var': box_cls_var_mean})
# Compute box regression epistemic variance and mean, and aleatoric
# variance mean
box_delta_list = [output['box_delta']
for output in outputs_list]
box_delta_mean = box_delta_list[0]
for i in range(len(box_delta_list) - 1):
box_delta_mean = [
box_delta_mean[j] +
box_delta_list[i][j] for j in range(
len(box_delta_mean))]
box_delta_mean = [
box_delta_f_map /
len(box_delta_list) for box_delta_f_map in box_delta_mean]
outputs.update({'box_delta': box_delta_mean})
if outputs_list[0]['box_reg_var'] is not None:
box_reg_var = [output['box_reg_var']
for output in outputs_list]
box_reg_var_mean = box_reg_var[0]
for i in range(len(box_reg_var) - 1):
box_reg_var_mean = [
box_reg_var_mean[j] +
box_reg_var[i][j] for j in range(
len(box_reg_var_mean))]
box_reg_var_mean = [
box_delta_f_map /
len(box_reg_var) for box_delta_f_map in box_reg_var_mean]
else:
box_reg_var_mean = None
outputs.update({'box_reg_var': box_reg_var_mean})
elif outputs is None:
outputs = self.model(input_im, return_anchorwise_output=True)
all_anchors = []
all_predicted_deltas = []
all_predicted_box_reg_var = []
all_predicted_boxes_cholesky = []
all_predicted_prob = []
all_classes_idxs = []
all_predicted_prob_vectors = []
all_predicted_boxes_epistemic_covar = []
for i, anchors in enumerate(outputs['anchors']):
box_cls = outputs['box_cls'][i][0]
box_delta = outputs['box_delta'][i][0]
# If classification aleatoric uncertainty available, perform
# monte-carlo sampling to generate logits.
if outputs['box_cls_var'] is not None:
box_cls_var = outputs['box_cls_var'][i][0]
box_cls_dists = torch.distributions.normal.Normal(
box_cls, scale=torch.sqrt(torch.exp(box_cls_var)))
box_cls = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
box_cls = torch.mean(box_cls.sigmoid(), 0)
else:
box_cls = box_cls.sigmoid()
# Keep top k top scoring indices only.
num_topk = min(self.model.test_topk_candidates, box_delta.size(0))
predicted_prob, classes_idxs = torch.max(box_cls, 1)
predicted_prob, topk_idxs = predicted_prob.topk(num_topk)
# filter out the proposals with low confidence score
keep_idxs = predicted_prob > self.model.test_score_thresh
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = topk_idxs[keep_idxs]
anchor_idxs = topk_idxs
classes_idxs = classes_idxs[topk_idxs]
box_delta = box_delta[anchor_idxs]
anchors = anchors[anchor_idxs]
cholesky_decomp = None
if outputs['box_reg_var'] is not None:
box_reg_var = outputs['box_reg_var'][i][0][anchor_idxs]
box_reg_var = clamp_log_variance(box_reg_var)
# Construct cholesky decomposition using diagonal vars
cholesky_decomp = covariance_output_to_cholesky(box_reg_var)
# In case dropout is enabled, we need to compute aleatoric
# covariance matrix and add it here:
box_reg_epistemic_covar = None
if is_epistemic:
# Compute epistemic box covariance matrix
box_delta_list_i = [
self.model.box2box_transform.apply_deltas(
box_delta_i[i][0][anchor_idxs],
anchors.tensor) for box_delta_i in box_delta_list]
_, box_reg_epistemic_covar = inference_utils.compute_mean_covariance_torch(
box_delta_list_i)
all_predicted_deltas.append(box_delta)
all_predicted_boxes_cholesky.append(cholesky_decomp)
all_predicted_box_reg_var.append(box_reg_var)
all_anchors.append(anchors.tensor)
all_predicted_prob.append(predicted_prob)
all_predicted_prob_vectors.append(box_cls[anchor_idxs])
all_classes_idxs.append(classes_idxs)
all_predicted_boxes_epistemic_covar.append(box_reg_epistemic_covar)
box_delta = cat(all_predicted_deltas)
anchors = cat(all_anchors)
if isinstance(all_predicted_boxes_cholesky[0], torch.Tensor):
# Generate multivariate samples to be used for monte-carlo simulation. We can afford much more samples
# here since the matrix dimensions are much smaller and therefore
# have much less memory footprint. Keep 100 or less to maintain
# reasonable runtime speed.
cholesky_decomp = cat(all_predicted_boxes_cholesky)
box_reg_var = cat(all_predicted_box_reg_var)
if self.use_mc_sampling:
if self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'gaussian':
multivariate_normal_samples = torch.distributions.MultivariateNormal(
box_delta, scale_tril=cholesky_decomp)
elif self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'laplacian':
multivariate_normal_samples = torch.distributions.Laplace(box_delta, scale=cholesky_decomp.diagonal(dim1=-2,dim2=-1)/math.sqrt(2.0))
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample(
(1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2)
samples_anchors = torch.repeat_interleave(
anchors.unsqueeze(2), 1000, dim=2)
# Transform samples from deltas to boxes
t_dist_samples = self.sample_box2box_transform.apply_samples_deltas(
distributions_samples, samples_anchors)
# Compute samples mean and covariance matrices.
all_predicted_boxes, all_predicted_boxes_covariance = inference_utils.compute_mean_covariance_torch(
t_dist_samples)
if isinstance(
all_predicted_boxes_epistemic_covar[0],
torch.Tensor):
epistemic_covar_mats = cat(
all_predicted_boxes_epistemic_covar)
all_predicted_boxes_covariance += epistemic_covar_mats
else:
all_predicted_boxes_covariance = torch.matmul(cholesky_decomp, torch.transpose(cholesky_decomp, -1, -2))
all_predicted_boxes = self.model.box2box_transform.apply_deltas(box_delta, anchors)
else:
# This handles the case where no aleatoric uncertainty is available
if is_epistemic:
all_predicted_boxes_covariance = cat(
all_predicted_boxes_epistemic_covar)
else:
all_predicted_boxes_covariance = []
# predict boxes
all_predicted_boxes = self.model.box2box_transform.apply_deltas(
box_delta, anchors)
if 'ppp' in outputs:
ppp = outputs['ppp']
else:
ppp = []
return all_predicted_boxes, all_predicted_boxes_covariance, cat(
all_predicted_prob), cat(all_classes_idxs), cat(all_predicted_prob_vectors), ppp
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results. It can combine aleatoric uncertainty
from heteroscedastic regression and epistemic uncertainty from monte-carlo dropout for both classification and
regression results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.model.test_nms_thresh, self.model.max_detections_per_image)
def post_processing_topk_detections(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results. It can combine aleatoric uncertainty
from heteroscedastic regression and epistemic uncertainty from monte-carlo dropout for both classification and
regression results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_topk_detection_postprocessing(input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
This function produces box covariance matrices using anchor statistics. Uses the fact that multiple anchors are
regressed to the same spatial location for clustering and extraction of box covariance matrix.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
def post_processing_mc_dropout_ensembles(self, input_im):
"""
This function produces results using multiple runs of MC dropout, through fusion before or after
the non-maximum suppression step.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
return self.post_processing_standard_nms(input_im)
else:
outputs_dict = self.model(
input_im,
return_anchorwise_output=False,
num_mc_dropout_runs=self.num_mc_dropout_runs)
n_fms = len(self.model.in_features)
outputs_list = [{key: value[i * n_fms:(i + 1) * n_fms] if value is not None else value for key,
value in outputs_dict.items()} for i in range(self.num_mc_dropout_runs)]
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.retinanet_probabilistic_inference(
input_im,
outputs=outputs),
self.model.test_nms_thresh,
self.model.max_detections_per_image) for outputs in outputs_list]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
"""
This function produces results using multiple runs of independently trained models, through fusion before or after
the non-maximum suppression step.
Args:
input_im (list): an input im list generated from dataset handler.
model_dict (dict): dictionary containing list of models comprising the ensemble.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
outputs_list = []
for model in model_dict:
outputs = model(input_im, return_anchorwise_output=True)
outputs_list.append(outputs)
outputs = self.retinanet_probabilistic_inference(
input_im, ensemble_inference=True, outputs_list=outputs_list)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.model.test_nms_thresh, self.model.max_detections_per_image)
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
This function produces results using forms of bayesian inference instead of NMS for both category and box results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
box_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE
cls_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE
outputs = self.retinanet_probabilistic_inference(input_im)
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
self.model.test_nms_thresh)
keep = keep[: self.model.max_detections_per_image]
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
box_clusters_inds = match_quality_matrix[keep, :]
box_clusters_inds = box_clusters_inds > self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD
# Compute mean and covariance for every cluster.
predicted_prob_vectors_list = []
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_centers = predicted_prob_vectors[keep]
for box_cluster, predicted_prob_vectors_center in zip(
box_clusters_inds, predicted_prob_vectors_centers):
cluster_categorical_params = predicted_prob_vectors[box_cluster]
center_binary_score, center_cat_idx = torch.max(
predicted_prob_vectors_center, 0)
cluster_binary_scores, cat_idx = cluster_categorical_params.max(
1)
class_similarity_idx = cat_idx == center_cat_idx
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors_list.append(
cluster_categorical_params.mean(0).unsqueeze(0))
else:
predicted_prob_vectors_list.append(
predicted_prob_vectors_center.unsqueeze(0))
# Switch to numpy as torch.inverse is too slow.
cluster_means = predicted_boxes[box_cluster,
:][class_similarity_idx].cpu().numpy()
cluster_covs = predicted_boxes_covariance[box_cluster, :][class_similarity_idx].cpu(
).numpy()
predicted_box, predicted_box_covariance = inference_utils.bounding_box_bayesian_inference(
cluster_means, cluster_covs, box_merge_mode)
predicted_boxes_list.append(
torch.from_numpy(np.squeeze(predicted_box)))
predicted_boxes_covariance_list.append(
torch.from_numpy(predicted_box_covariance))
# Switch back to cuda for the remainder of the inference process.
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors = torch.cat(
predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors, 1)
elif cls_merge_mode == 'max_score':
predicted_prob_vectors = predicted_prob_vectors[keep]
predicted_prob = predicted_prob[keep]
classes_idxs = classes_idxs[keep]
result.pred_boxes = Boxes(
torch.stack(
predicted_boxes_list,
0).to(self.model.device))
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0).to(self.model.device)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(
predicted_boxes.shape[0]).to(
self.model.device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(self.model.device)
return result
| 23,910
| 44.894434
| 152
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_inference/probabilistic_rcnn_predictor.py
|
import numpy as np
import torch
# Detectron Imports
from detectron2.layers import batched_nms
from detectron2.structures import Boxes, Instances, pairwise_iou
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class GeneralizedRcnnProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# Define test score threshold
self.test_score_thres = self.model.roi_heads.box_predictor.test_score_thresh
self.test_nms_thresh = self.model.roi_heads.box_predictor.test_nms_thresh
self.test_topk_per_image = self.model.roi_heads.box_predictor.test_topk_per_image
# Create transform
self.sample_box2box_transform = inference_utils.SampleBox2BoxTransform(
self.cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
# Put proposal generator in eval mode if dropout enabled
if self.mc_dropout_enabled:
self.model.proposal_generator.eval()
def generalized_rcnn_probabilistic_inference(self,
input_im,
outputs=None,
ensemble_inference=False,
outputs_list=None):
"""
General RetinaNet probabilistic anchor-wise inference. Preliminary inference step for many post-processing
based inference methods such as standard_nms, output_statistics, and bayes_od.
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): outputs from model.forward(). will be computed internally if not provided.
ensemble_inference (bool): True if ensembles are used for inference. If set to true, outputs_list must be externally provided.
outputs_list (list): List of model() outputs, usually generated from ensembles of models.
Returns:
all_predicted_boxes,
all_predicted_boxes_covariance (Tensor): Nx4x4 vectors used
all_predicted_prob (Tensor): Nx1 scores which represent max of all_pred_prob_vectors. For usage in NMS and mAP computation.
all_classes_idxs (Tensor): Nx1 Class ids to be used for NMS.
all_predicted_prob_vectors (Tensor): NxK tensor where K is the number of classes.
"""
is_epistemic = ((self.mc_dropout_enabled and self.num_mc_dropout_runs > 1)
or ensemble_inference) and outputs is None
if is_epistemic:
if self.mc_dropout_enabled and self.num_mc_dropout_runs > 1:
outputs_list = self.model(
input_im,
return_anchorwise_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs)
proposals_list = [outputs['proposals']
for outputs in outputs_list]
box_delta_list = [outputs['box_delta']
for outputs in outputs_list]
box_cls_list = [outputs['box_cls'] for outputs in outputs_list]
box_reg_var_list = [outputs['box_reg_var']
for outputs in outputs_list]
box_cls_var_list = [outputs['box_cls_var']
for outputs in outputs_list]
outputs = dict()
proposals_all = proposals_list[0].proposal_boxes.tensor
for i in torch.arange(1, len(outputs_list)):
proposals_all = torch.cat(
(proposals_all, proposals_list[i].proposal_boxes.tensor), 0)
proposals_list[0].proposal_boxes.tensor = proposals_all
outputs['proposals'] = proposals_list[0]
box_delta = torch.cat(box_delta_list, 0)
box_cls = torch.cat(box_cls_list, 0)
outputs['box_delta'] = box_delta
outputs['box_cls'] = box_cls
if box_reg_var_list[0] is not None:
box_reg_var = torch.cat(box_reg_var_list, 0)
else:
box_reg_var = None
outputs['box_reg_var'] = box_reg_var
if box_cls_var_list[0] is not None:
box_cls_var = torch.cat(box_cls_var_list, 0)
else:
box_cls_var = None
outputs['box_cls_var'] = box_cls_var
elif outputs is None:
outputs = self.model(input_im,
return_anchorwise_output=True)
proposals = outputs['proposals']
box_cls = outputs['box_cls']
box_delta = outputs['box_delta']
if self.model.cls_var_loss == 'evidential':
box_dir_alphas = inference_utils.get_dir_alphas(box_cls)
box_dir_alphas = box_dir_alphas
box_cls = box_dir_alphas / box_dir_alphas.sum(1, keepdim=True)
else:
if outputs['box_cls_var'] is not None:
box_cls_var = outputs['box_cls_var']
box_cls_dists = torch.distributions.normal.Normal(
box_cls, scale=torch.sqrt(torch.exp(box_cls_var)))
box_cls = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
box_cls = torch.nn.functional.softmax(box_cls, dim=-1)
box_cls = box_cls.mean(0)
else:
box_cls = torch.nn.functional.softmax(box_cls, dim=-1)
# Remove background category
scores = box_cls[:, :-1]
num_bbox_reg_classes = box_delta.shape[1] // 4
box_delta = box_delta.reshape(-1, 4)
box_delta = box_delta.view(-1, num_bbox_reg_classes, 4)
filter_mask = scores > self.test_score_thres
filter_inds = filter_mask.nonzero(as_tuple=False)
if num_bbox_reg_classes == 1:
box_delta = box_delta[filter_inds[:, 0], 0]
else:
box_delta = box_delta[filter_mask]
scores = scores[filter_mask]
proposal_boxes = proposals.proposal_boxes.tensor[filter_inds[:, 0]]
if outputs['box_reg_var'] is not None:
box_reg_var = outputs['box_reg_var']
box_reg_var = box_reg_var.reshape(-1, self.model.bbox_cov_dims)
box_reg_var = box_reg_var.view(-1,
num_bbox_reg_classes,
self.model.bbox_cov_dims)
if num_bbox_reg_classes == 1:
box_reg_var = box_reg_var[filter_inds[:, 0], 0]
else:
box_reg_var = box_reg_var[filter_mask]
# Reconstruct cholesky decomposition of box covariance
# matrix
diag_vars = clamp_log_variance(box_reg_var)
cholesky_decomp = covariance_output_to_cholesky(diag_vars)
if self.use_mc_sampling:
# Generate multivariate samples to be used for monte-carlo simulation. We can afford much more samples
# here since the matrix dimensions are much smaller and therefore
# have much less memory footprint. Keep 100 or less to maintain
# reasonable runtime speed.
if self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'gaussian':
multivariate_normal_samples = torch.distributions.MultivariateNormal(
box_delta, scale_tril=cholesky_decomp)
elif self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'laplacian':
multivariate_normal_samples = torch.distributions.Laplace(box_delta, scale=cholesky_decomp.diagonal(dim1=-2,dim2=-1)/np.sqrt(2.0))
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample(
(1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2)
samples_proposals = torch.repeat_interleave(
proposal_boxes.unsqueeze(2), 1000, dim=2)
# Transform samples from deltas to boxes
t_dist_samples = self.sample_box2box_transform.apply_samples_deltas(
distributions_samples, samples_proposals)
# Compute samples mean and covariance matrices.
boxes, boxes_covars = inference_utils.compute_mean_covariance_torch(
t_dist_samples)
else:
boxes = self.model.roi_heads.box_predictor.box2box_transform.apply_deltas(
box_delta, proposal_boxes)
boxes_covars = torch.matmul(cholesky_decomp, torch.transpose(cholesky_decomp, -1, -2))
else:
# predict boxes
boxes = self.model.roi_heads.box_predictor.box2box_transform.apply_deltas(
box_delta, proposal_boxes)
boxes_covars = []
if 'ppp' in outputs:
ppp = outputs['ppp']
else:
ppp = []
return boxes, boxes_covars, scores, filter_inds[:,
1], box_cls[filter_inds[:, 0]], ppp
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.test_nms_thresh, self.test_topk_per_image)
def post_processing_topk_detections(self, input_im):
"""
This function produces results using topk selection based on confidence scores.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_topk_detection_postprocessing(input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
This function produces results using anchor statistics.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
def post_processing_mc_dropout_ensembles(self, input_im):
"""
This function produces results using monte-carlo dropout ensembles.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
# In generalized rcnn models, association cannot be achieved on an anchor level when using
# dropout as anchor order might shift. To overcome this problem, the anchor statistics function
# is used to perform the association and to fuse covariance
# results.
return self.post_processing_output_statistics(input_im)
else:
outputs_list = self.model(
input_im,
return_anchorwise_output=False,
num_mc_dropout_runs=self.num_mc_dropout_runs)
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.generalized_rcnn_probabilistic_inference(
input_im,
outputs=outputs),
self.test_nms_thresh,
self.test_topk_per_image) for outputs in outputs_list]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
outputs_list = []
for model in model_dict:
outputs = model(input_im, return_anchorwise_output=True)
outputs_list.append(outputs)
outputs = self.generalized_rcnn_probabilistic_inference(
input_im, ensemble_inference=True, outputs_list=outputs_list)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
This function produces results using forms of bayesian inference instead of NMS for both category
and box results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
box_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE
cls_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
self.test_nms_thresh)
keep = keep[: self.test_topk_per_image]
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
box_clusters_inds = match_quality_matrix[keep, :]
box_clusters_inds = box_clusters_inds > self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD
# Compute mean and covariance for every cluster.
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_list = []
predicted_prob_vectors_centers = predicted_prob_vectors[keep]
for box_cluster, predicted_prob_vectors_center in zip(
box_clusters_inds, predicted_prob_vectors_centers):
# Ignore background categories provided by detectron2 inference
cluster_categorical_params = predicted_prob_vectors[box_cluster]
_, center_cat_idx = torch.max(predicted_prob_vectors_center, 0)
_, cat_idx = cluster_categorical_params.max(1)
class_similarity_idx = cat_idx == center_cat_idx
if cls_merge_mode == 'bayesian_inference':
cluster_categorical_params = cluster_categorical_params[class_similarity_idx]
predicted_prob_vectors_list.append(
cluster_categorical_params.mean(0).unsqueeze(0))
else:
predicted_prob_vectors_list.append(
predicted_prob_vectors_center.unsqueeze(0))
# Switch to numpy as torch.inverse is too slow.
cluster_means = predicted_boxes[box_cluster,
:][class_similarity_idx].cpu().numpy()
cluster_covs = predicted_boxes_covariance[box_cluster, :][class_similarity_idx].cpu(
).numpy()
predicted_box, predicted_box_covariance = inference_utils.bounding_box_bayesian_inference(
cluster_means, cluster_covs, box_merge_mode)
predicted_boxes_list.append(
torch.from_numpy(np.squeeze(predicted_box)))
predicted_boxes_covariance_list.append(
torch.from_numpy(predicted_box_covariance))
# Switch back to cuda for the remainder of the inference process.
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors = torch.cat(
predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors[:, :-1], 1)
elif cls_merge_mode == 'max_score':
predicted_prob_vectors = predicted_prob_vectors[keep]
predicted_prob = predicted_prob[keep]
classes_idxs = classes_idxs[keep]
result.pred_boxes = Boxes(
torch.stack(
predicted_boxes_list,
0).to(self.model.device))
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0).to(self.model.device)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(
predicted_boxes.shape[0]).to(
self.model.device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(self.model.device)
return result
| 20,442
| 43.733042
| 150
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_inference/inference_utils.py
|
import os
import numpy as np
import torch
from detectron2.layers import batched_nms
# Detectron imports
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.structures import Boxes, BoxMode, Instances, pairwise_iou
from PIL import Image
# Project imports
from probabilistic_inference.image_corruptions import corruption_dict, corruption_tuple
from probabilistic_inference.probabilistic_detr_predictor import (
DetrProbabilisticPredictor,
)
from probabilistic_inference.probabilistic_rcnn_predictor import (
GeneralizedRcnnProbabilisticPredictor,
)
from probabilistic_inference.probabilistic_retinanet_predictor import (
RetinaNetProbabilisticPredictor,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def build_predictor(cfg):
"""
Builds probabilistic predictor according to architecture in config file.
Args:
cfg (CfgNode): detectron2 configuration node.
Returns:
Instance of the correct predictor.
"""
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
return RetinaNetProbabilisticPredictor(cfg)
elif cfg.MODEL.META_ARCHITECTURE == "ProbabilisticGeneralizedRCNN":
return GeneralizedRcnnProbabilisticPredictor(cfg)
elif cfg.MODEL.META_ARCHITECTURE == "ProbabilisticDetr":
return DetrProbabilisticPredictor(cfg)
else:
raise ValueError(
"Invalid meta-architecture {}.".format(cfg.MODEL.META_ARCHITECTURE)
)
def general_standard_nms_postprocessing(
input_im, outputs, nms_threshold=0.5, max_detections_per_image=100
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
nms_threshold (float): non-maximum suppression threshold
max_detections_per_image (int): maximum allowed number of detections per image.
Returns:
result (Instances): final results after nms
"""
(
predicted_boxes,
predicted_boxes_covariance,
predicted_prob,
classes_idxs,
predicted_prob_vectors,
ppp,
) = outputs
# Perform nms
keep = batched_nms(predicted_boxes, predicted_prob, classes_idxs, nms_threshold)
keep = keep[:max_detections_per_image]
# Keep highest scoring results
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
# Handle case where there is no ppp intensity function such as classical
# inference.
if isinstance(ppp, dict):
for k, v in ppp.items():
result.set(
"ppp_param_" + k,
torch.tensor([v] * (len(result.pred_boxes))).to(device),
)
else:
result.pred_ppp_weights = np.nan * torch.ones(len(result.pred_boxes)).to(device)
# Handle case where there is no covariance matrix such as classical
# inference.
if isinstance(predicted_boxes_covariance, torch.Tensor):
result.pred_boxes_covariance = predicted_boxes_covariance[keep]
else:
result.pred_boxes_covariance = torch.zeros(
predicted_boxes[keep].shape + (4,)
).to(device)
return result
def general_topk_detection_postprocessing(
input_im, outputs, max_detections_per_image=100
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
Returns:
result (Instances): final results after nms
"""
(
predicted_boxes,
predicted_boxes_covariance,
predicted_prob,
classes_idxs,
predicted_prob_vectors,
ppp,
) = outputs
num_keep = min(max_detections_per_image, len(predicted_prob))
keep = torch.topk(predicted_prob, num_keep)[1]
# Keep highest scoring results
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
# Handle case where there is no ppp intensity function such as classical
# inference.
if isinstance(ppp, dict):
for k, v in ppp.items():
result.set(
"ppp_param_" + k,
torch.tensor([v] * (len(result.pred_boxes))).to(device),
)
else:
result.pred_ppp_weights = np.nan * torch.ones(len(result.pred_boxes)).to(device)
# Handle case where there is no covariance matrix such as classical
# inference.
if isinstance(predicted_boxes_covariance, torch.Tensor):
result.pred_boxes_covariance = predicted_boxes_covariance[keep]
else:
result.pred_boxes_covariance = torch.zeros(
predicted_boxes[keep].shape + (4,)
).to(device)
return result
def general_output_statistics_postprocessing(
input_im,
outputs,
nms_threshold=0.5,
max_detections_per_image=100,
affinity_threshold=0.7,
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
nms_threshold (float): non-maximum suppression threshold between 0-1
max_detections_per_image (int): maximum allowed number of detections per image.
affinity_threshold (float): cluster affinity threshold between 0-1
Returns:
result (Instances): final results after nms
"""
(
predicted_boxes,
predicted_boxes_covariance,
predicted_prob,
classes_idxs,
predicted_prob_vectors,
ppp,
) = outputs
# Get pairwise iou matrix
match_quality_matrix = pairwise_iou(Boxes(predicted_boxes), Boxes(predicted_boxes))
# Get cluster centers using standard nms. Much faster than sequential
# clustering.
keep = batched_nms(predicted_boxes, predicted_prob, classes_idxs, nms_threshold)
keep = keep[:max_detections_per_image]
clusters_inds = match_quality_matrix[keep, :]
clusters_inds = clusters_inds > affinity_threshold
# Compute mean and covariance for every cluster.
predicted_prob_vectors_list = []
predicted_boxes_list = []
predicted_boxes_covariance_list = []
for cluster_idxs, center_idx in zip(clusters_inds, keep):
if cluster_idxs.sum(0) >= 2:
# Make sure to only select cluster members of same class as center
cluster_center_classes_idx = classes_idxs[center_idx]
cluster_classes_idxs = classes_idxs[cluster_idxs]
class_similarity_idxs = cluster_classes_idxs == cluster_center_classes_idx
# Grab cluster
box_cluster = predicted_boxes[cluster_idxs, :][class_similarity_idxs, :]
cluster_mean = box_cluster.mean(0)
residuals = (box_cluster - cluster_mean).unsqueeze(2)
cluster_covariance = torch.sum(
torch.matmul(residuals, torch.transpose(residuals, 2, 1)), 0
) / max((box_cluster.shape[0] - 1), 1.0)
# Assume final result as mean and covariance of gaussian mixture of cluster members if
# covariance is provided by neural network.
if predicted_boxes_covariance is not None:
if len(predicted_boxes_covariance) > 0:
cluster_covariance = (
cluster_covariance
+ predicted_boxes_covariance[cluster_idxs, :][
class_similarity_idxs, :
].mean(0)
)
# Compute average over cluster probabilities
cluster_probs_vector = predicted_prob_vectors[cluster_idxs, :][
class_similarity_idxs, :
].mean(0)
else:
cluster_mean = predicted_boxes[center_idx]
cluster_probs_vector = predicted_prob_vectors[center_idx]
cluster_covariance = 1e-4 * torch.eye(4, 4).to(device)
if predicted_boxes_covariance is not None:
if len(predicted_boxes_covariance) > 0:
cluster_covariance = predicted_boxes_covariance[center_idx]
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(cluster_covariance)
predicted_prob_vectors_list.append(cluster_probs_vector)
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
if len(predicted_boxes_list) > 0:
# We do not average the probability vectors for this post processing method. Averaging results in
# very low mAP due to mixing with low scoring detection instances.
result.pred_boxes = Boxes(torch.stack(predicted_boxes_list, 0))
predicted_prob_vectors = torch.stack(predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(predicted_prob_vectors, 1)
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(predicted_boxes_covariance_list, 0)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(predicted_boxes.shape[0]).to(device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty((predicted_boxes.shape + (4,))).to(
device
)
return result
def general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
nms_threshold=0.5,
max_detections_per_image=100,
affinity_threshold=0.7,
is_generalized_rcnn=False,
merging_method="mixture_of_gaussians",
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
ensemble_pred_box_list (list): predicted box list
ensembles_class_idxs_list (list): predicted classes list
ensemble_pred_prob_vectors_list (list): predicted probability vector list
ensembles_pred_box_covariance_list (list): predicted covariance matrices
nms_threshold (float): non-maximum suppression threshold between 0-1
max_detections_per_image (int): Number of maximum allowable detections per image.
affinity_threshold (float): cluster affinity threshold between 0-1
is_generalized_rcnn (bool): used to handle category selection by removing background class.
merging_method (str): default is gaussian mixture model. use 'bayesian_inference' to perform gaussian inference
similar to bayesod.
Returns:
result (Instances): final results after nms
"""
predicted_boxes = torch.cat(ensemble_pred_box_list, 0)
predicted_boxes_covariance = torch.cat(ensembles_pred_box_covariance_list, 0)
predicted_prob_vectors = torch.cat(ensemble_pred_prob_vectors_list, 0)
predicted_class_idxs = torch.cat(ensembles_class_idxs_list, 0)
# Compute iou between all output boxes and each other output box.
match_quality_matrix = pairwise_iou(Boxes(predicted_boxes), Boxes(predicted_boxes))
# Perform basic sequential clustering.
clusters = []
for i in range(match_quality_matrix.shape[0]):
# Check if current box is already a member of any previous cluster.
if i != 0:
all_clusters = torch.cat(clusters, 0)
if (all_clusters == i).any():
continue
# Only add if boxes have the same category.
cluster_membership_test = (match_quality_matrix[i, :] >= affinity_threshold) & (
predicted_class_idxs == predicted_class_idxs[i]
)
inds = torch.where(cluster_membership_test)
clusters.extend(inds)
# Compute mean and covariance for every cluster.
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_list = []
# Compute cluster mean and covariance matrices.
for cluster in clusters:
box_cluster = predicted_boxes[cluster]
box_cluster_covariance = predicted_boxes_covariance[cluster]
if box_cluster.shape[0] >= 2:
if merging_method == "mixture_of_gaussians":
cluster_mean = box_cluster.mean(0)
# Compute epistemic covariance
residuals = (box_cluster - cluster_mean).unsqueeze(2)
predicted_covariance = torch.sum(
torch.matmul(residuals, torch.transpose(residuals, 2, 1)), 0
) / (box_cluster.shape[0] - 1)
# Add epistemic covariance
predicted_covariance = (
predicted_covariance + box_cluster_covariance.mean(0)
)
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(predicted_covariance)
predicted_prob_vectors_list.append(
predicted_prob_vectors[cluster].mean(0)
)
else:
cluster_mean, predicted_covariance = bounding_box_bayesian_inference(
box_cluster.cpu().numpy(),
box_cluster_covariance.cpu().numpy(),
box_merge_mode="bayesian_inference",
)
cluster_mean = torch.as_tensor(cluster_mean).to(device)
predicted_covariance = torch.as_tensor(predicted_covariance).to(device)
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(predicted_covariance)
predicted_prob_vectors_list.append(
predicted_prob_vectors[cluster].mean(0)
)
else:
predicted_boxes_list.append(predicted_boxes[cluster].mean(0))
predicted_boxes_covariance_list.append(
predicted_boxes_covariance[cluster].mean(0)
)
predicted_prob_vectors_list.append(predicted_prob_vectors[cluster].mean(0))
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
if len(predicted_boxes_list) > 0:
predicted_prob_vectors = torch.stack(predicted_prob_vectors_list, 0)
# Remove background class if generalized rcnn
if is_generalized_rcnn:
predicted_prob_vectors_no_bkg = predicted_prob_vectors[:, :-1]
else:
predicted_prob_vectors_no_bkg = predicted_prob_vectors
predicted_prob, classes_idxs = torch.max(predicted_prob_vectors_no_bkg, 1)
predicted_boxes = torch.stack(predicted_boxes_list, 0)
# We want to keep the maximum allowed boxes per image to be consistent
# with the rest of the methods. However, just sorting by score or uncertainty will lead to a lot of
# redundant detections so we have to use one more NMS step.
keep = batched_nms(predicted_boxes, predicted_prob, classes_idxs, nms_threshold)
keep = keep[:max_detections_per_image]
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
result.pred_boxes_covariance = torch.stack(predicted_boxes_covariance_list, 0)[
keep
]
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(predicted_boxes.shape[0]).to(device)
result.pred_classes = predicted_class_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty((predicted_boxes.shape + (4,))).to(
device
)
return result
def bounding_box_bayesian_inference(cluster_means, cluster_covs, box_merge_mode):
"""
Args:
cluster_means (nd array): cluster box means.
cluster_covs (nd array): cluster box covariance matrices.
box_merge_mode (str): whether to use covariance intersection or not
Returns:
final_mean (nd array): cluster fused mean.
final_cov (nd array): cluster fused covariance matrix.
"""
cluster_precs = np.linalg.inv(cluster_covs)
if box_merge_mode == "bayesian_inference":
final_cov = np.linalg.inv(cluster_precs.sum(0))
final_mean = np.matmul(cluster_precs, np.expand_dims(cluster_means, 2)).sum(0)
final_mean = np.squeeze(np.matmul(final_cov, final_mean))
elif box_merge_mode == "covariance_intersection":
cluster_difference_precs = cluster_precs.sum(0) - cluster_precs
cluster_precs_det = np.linalg.det(cluster_precs)
cluster_total_prec_det = np.linalg.det(cluster_precs.sum(0))
cluster_difference_precs_det = np.linalg.det(cluster_difference_precs)
omegas = (
cluster_total_prec_det - cluster_difference_precs_det + cluster_precs_det
) / (
cluster_precs.shape[0] * cluster_total_prec_det
+ (cluster_precs_det - cluster_difference_precs_det).sum(0)
)
weighted_cluster_precs = np.expand_dims(omegas, (1, 2)) * cluster_precs
final_cov = np.linalg.inv(weighted_cluster_precs.sum(0))
final_mean = np.squeeze(
np.matmul(
final_cov,
np.matmul(weighted_cluster_precs, np.expand_dims(cluster_means, 2)).sum(
0
),
)
)
return final_mean, final_cov
def compute_mean_covariance_torch(input_samples):
"""
Function for efficient computation of mean and covariance matrix in pytorch.
Args:
input_samples(list): list of tensors from M stochastic monte-carlo sampling runs, each containing N x k tensors.
Returns:
predicted_mean(Tensor): an Nxk tensor containing the predicted mean.
predicted_covariance(Tensor): an Nxkxk tensor containing the predicted covariance matrix.
"""
if isinstance(input_samples, torch.Tensor):
num_samples = input_samples.shape[2]
else:
num_samples = len(input_samples)
input_samples = torch.stack(input_samples, 2)
# Compute Mean
predicted_mean = torch.mean(input_samples, 2, keepdim=True)
# Compute Covariance
residuals = torch.transpose(
torch.unsqueeze(input_samples - predicted_mean, 1), 1, 3
)
predicted_covariance = torch.matmul(residuals, torch.transpose(residuals, 3, 2))
predicted_covariance = torch.sum(predicted_covariance, 1) / (num_samples - 1)
return predicted_mean.squeeze(2), predicted_covariance
def probabilistic_detector_postprocess(results, output_height, output_width):
"""
Resize the output instances and scales estimated covariance matrices.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
Args:
results (Dict): the raw outputs from the probabilistic detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height: the desired output resolution.
output_width: the desired output resolution.
Returns:
results (Dict): dictionary updated with rescaled boxes and covariance matrices.
"""
scale_x, scale_y = (
output_width / results.image_size[1],
output_height / results.image_size[0],
)
results = Instances((output_height, output_width), **results.get_fields())
output_boxes = results.pred_boxes
# Scale bounding boxes
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
# Scale covariance matrices
if results.has("pred_boxes_covariance"):
# Add small value to make sure covariance matrix is well conditioned
output_boxes_covariance = results.pred_boxes_covariance + 1e-4 * torch.eye(
results.pred_boxes_covariance.shape[2]
).to(device)
scale_mat = (
torch.diag_embed(torch.as_tensor((scale_x, scale_y, scale_x, scale_y)))
.to(device)
.unsqueeze(0)
)
scale_mat = torch.repeat_interleave(
scale_mat, output_boxes_covariance.shape[0], 0
)
output_boxes_covariance = torch.matmul(
torch.matmul(scale_mat, output_boxes_covariance),
torch.transpose(scale_mat, 2, 1),
)
results.pred_boxes_covariance = output_boxes_covariance
return results
def covar_xyxy_to_xywh(output_boxes_covariance):
"""
Converts covariance matrices from top-left bottom-right corner representation to top-left corner
and width-height representation.
Args:
output_boxes_covariance: Input covariance matrices.
Returns:
output_boxes_covariance (Nxkxk): Transformed covariance matrices
"""
transformation_mat = (
torch.as_tensor(
[[1.0, 0, 0, 0], [0, 1.0, 0, 0], [-1.0, 0, 1.0, 0], [0, -1.0, 0, 1.0]]
)
.to(device)
.unsqueeze(0)
)
transformation_mat = torch.repeat_interleave(
transformation_mat, output_boxes_covariance.shape[0], 0
)
output_boxes_covariance = torch.matmul(
torch.matmul(transformation_mat, output_boxes_covariance),
torch.transpose(transformation_mat, 2, 1),
)
return output_boxes_covariance
def instances_to_json(instances, img_id, cat_mapping_dict=None):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances): detectron2 instances
img_id (int): the image id
cat_mapping_dict (dict): dictionary to map between raw category id from net and dataset id. very important if
performing inference on different dataset than that used for training.
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.cpu().numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.cpu().tolist()
classes = instances.pred_classes.cpu().tolist()
ppp = {
k[10:]: v[0].detach().cpu().numpy().tolist()
for k, v in instances.get_fields().items()
if "ppp_param" in k
}
classes = [
cat_mapping_dict[class_i] if class_i in cat_mapping_dict.keys() else -1
for class_i in classes
]
pred_cls_probs = instances.pred_cls_probs.cpu().tolist()
if instances.has("pred_boxes_covariance"):
pred_boxes_covariance = (
covar_xyxy_to_xywh(instances.pred_boxes_covariance).cpu().tolist()
)
else:
pred_boxes_covariance = []
results = []
for k in range(num_instance):
if classes[k] != -1:
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
"cls_prob": pred_cls_probs[k],
"bbox_covar": pred_boxes_covariance[k],
"ppp": ppp,
"image_size": list(instances[k].image_size),
}
results.append(result)
return results
class SampleBox2BoxTransform(Box2BoxTransform):
"""
Extension of Box2BoxTransform to support transforming across batch sizes.
"""
def apply_samples_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2, :] - boxes[:, 0, :]
heights = boxes[:, 3, :] - boxes[:, 1, :]
ctr_x = boxes[:, 0, :] + 0.5 * widths
ctr_y = boxes[:, 1, :] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4, :] / wx
dy = deltas[:, 1::4, :] / wy
dw = deltas[:, 2::4, :] / ww
dh = deltas[:, 3::4, :] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4, :] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4, :] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4, :] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4, :] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
def corrupt(x, severity=1, corruption_name=None, corruption_number=None):
"""
:param x: image to corrupt; a 224x224x3 numpy array in [0, 255]
:param severity: strength with which to corrupt x; an integer in [0, 5]
:param corruption_name: specifies which corruption function to call;
must be one of 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression',
'speckle_noise', 'gaussian_blur', 'spatter', 'saturate';
the last four are validation functions
:param corruption_number: the position of the corruption_name in the above list;
an integer in [0, 18]; useful for easy looping; 15, 16, 17, 18 are validation corruption numbers
:return: the image x corrupted by a corruption function at the given severity; same shape as input
"""
if corruption_name is not None:
x_corrupted = corruption_dict[corruption_name](Image.fromarray(x), severity)
elif corruption_number is not None:
x_corrupted = corruption_tuple[corruption_number](Image.fromarray(x), severity)
else:
raise ValueError("Either corruption_name or corruption_number must be passed")
if x_corrupted.shape != x.shape:
raise AssertionError("Output image not same size as input image!")
return np.uint8(x_corrupted)
def get_dir_alphas(pred_class_logits):
"""
Function to get dirichlet parameters from logits
Args:
pred_class_logits: class logits
"""
return torch.relu_(pred_class_logits) + 1.0
def get_inference_output_dir(
output_dir_name, test_dataset_name, inference_config_name, image_corruption_level
):
return os.path.join(
output_dir_name,
"inference",
test_dataset_name,
os.path.split(inference_config_name)[-1][:-5],
"corruption_level_" + str(image_corruption_level),
)
| 27,584
| 36.995868
| 120
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_inference/__init__.py
| 0
| 0
| 0
|
py
|
|
pmb-nll
|
pmb-nll-main/src/probabilistic_inference/inference_core.py
|
import cv2
import os
from abc import ABC, abstractmethod
# Detectron Imports
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.modeling import build_model
from core.visualization_tools.probabilistic_visualizer import ProbabilisticVisualizer
# Project Imports
from probabilistic_inference import inference_utils
class ProbabilisticPredictor(ABC):
"""
Abstract class for probabilistic predictor.
"""
def __init__(self, cfg):
# Create common attributes.
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model_list = []
# Parse config
self.inference_mode = self.cfg.PROBABILISTIC_INFERENCE.INFERENCE_MODE
self.mc_dropout_enabled = self.cfg.PROBABILISTIC_INFERENCE.MC_DROPOUT.ENABLE
self.num_mc_dropout_runs = self.cfg.PROBABILISTIC_INFERENCE.MC_DROPOUT.NUM_RUNS
self.use_mc_sampling = cfg.PROBABILISTIC_INFERENCE.USE_MC_SAMPLING
# Set model to train for MC-Dropout runs
if self.mc_dropout_enabled:
self.model.train()
else:
self.model.eval()
# Create ensemble if applicable.
if self.inference_mode == 'ensembles':
ensemble_random_seeds = self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.RANDOM_SEED_NUMS
for i, random_seed in enumerate(ensemble_random_seeds):
model = build_model(self.cfg)
model.eval()
checkpoint_dir = os.path.join(
os.path.split(
self.cfg.OUTPUT_DIR)[0],
'random_seed_' +
str(random_seed))
# Load last checkpoint.
DetectionCheckpointer(
model,
save_dir=checkpoint_dir).resume_or_load(
cfg.MODEL.WEIGHTS,
resume=True)
self.model_list.append(model)
else:
# Or Load single model last checkpoint.
DetectionCheckpointer(
self.model,
save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS,
resume=True)
def __call__(self, input_im):
# Generate detector output.
if self.inference_mode == 'standard_nms':
results = self.post_processing_standard_nms(input_im)
elif self.inference_mode == 'mc_dropout_ensembles':
results = self.post_processing_mc_dropout_ensembles(
input_im)
elif self.inference_mode == 'output_statistics':
results = self.post_processing_output_statistics(
input_im)
elif self.inference_mode == 'ensembles':
results = self.post_processing_ensembles(input_im, self.model_list)
elif self.inference_mode == 'bayes_od':
results = self.post_processing_bayes_od(input_im)
elif self.inference_mode == 'topk_detections':
results = self.post_processing_topk_detections(input_im)
else:
raise ValueError(
'Invalid inference mode {}.'.format(
self.inference_mode))
# Perform post processing on detector output.
height = input_im[0].get("height", results.image_size[0])
width = input_im[0].get("width", results.image_size[1])
results = inference_utils.probabilistic_detector_postprocess(results,
height,
width)
return results
def visualize_inference(
self,
inputs,
results,
gt=None,
min_allowed_score=-1,
class_map=None,
gt_class_map=None,
num_samples=0,
):
"""
A function used to visualize final network predictions.
It shows the original image and up to 20
predicted object bounding boxes on the original image.
Valuable for debugging inference methods.
Args:
inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements.
"""
max_boxes = 100
required_width = inputs[0]["width"]
required_height = inputs[0]["height"]
img = inputs[0]["image"].cpu().numpy()
assert img.shape[0] == 3, "Images should have 3 channels."
if self.model.input_format == "RGB":
img = img[::-1, :, :]
img = img.transpose(1, 2, 0)
img = cv2.resize(img, (required_width, required_height))
predicted_boxes = results.pred_boxes.tensor.cpu().numpy()
predicted_covar_mats = results.pred_boxes_covariance.cpu().numpy()
scores = results.scores.cpu().numpy()
#scores[0] = 0.75
if class_map:
labels = np.array(
[
f"{class_map[cls]}: {round(score, 2)}"
for score, cls in zip(
scores.tolist(), results.pred_classes.numpy().tolist()
)
]
)
else:
labels = np.array([f"{s:.2f}" for s in scores])
if gt is not None:
gt_boxes = gt["gt_boxes"].cpu().numpy()
gt_labels = [class_map[gt_class_map[int(cls.squeeze())]] if class_map and gt_class_map else int(cls.squeeze()) for cls in gt["gt_cat_idxs"].cpu().numpy()]
v_gt = ProbabilisticVisualizer(img, None)
v_img = v_gt.overlay_instances(boxes=gt_boxes, labels=gt_labels, assigned_colors=["g"]*len(gt_labels))
gt_img = v_img.get_image()
gt_vis_name = f"GT. Image id {inputs[0]['image_id']}"
cv2.imshow(gt_vis_name, gt_img)
else:
v_gt = None
v_pred = ProbabilisticVisualizer(img, None) if v_gt is None else v_gt
alpha = 0.5
assinged_colors = ["red"]* len(predicted_boxes[scores > min_allowed_score][0:max_boxes])
assinged_colors = None
"""v_pred.overlay_covariance_instances(
boxes=predicted_boxes[scores < min_allowed_score][0:max_boxes],
covariance_matrices=predicted_covar_mats[scores < min_allowed_score][
0:max_boxes
],
labels=labels[scores < min_allowed_score][0:max_boxes],
assigned_colors=assinged_colors,
alpha=0.05,
)"""
v_pred = v_pred.overlay_covariance_instances(
boxes=predicted_boxes[scores > min_allowed_score][0:max_boxes],
covariance_matrices=predicted_covar_mats[scores > min_allowed_score][
0:max_boxes
],
labels=labels[scores > min_allowed_score][0:max_boxes],
assigned_colors=assinged_colors,
alpha=0.8
)
prop_img = v_pred.get_image()
vis_name = (
f"{max_boxes} Highest Scoring Results. Image id {inputs[0]['image_id']}"
)
cv2.imshow(vis_name, prop_img)
if num_samples > 0:
for i in range(num_samples):
sampled_boxes = []
means = predicted_boxes[scores > min_allowed_score]
covs = predicted_covar_mats[scores > min_allowed_score]
ss = scores[scores > min_allowed_score]
for j in range(len(means)):
if ss[j] < 0.1:
n = np.random.poisson(scores[j])
else:
n = 1 if ss[j] > np.random.rand() else 0
for _ in range(n):
sampled_box = np.random.multivariate_normal(
mean=means[j],
cov=covs[j],
)
sampled_boxes.append(sampled_box)
sampled_boxes = np.array(sampled_boxes)
v_pred_sample = ProbabilisticVisualizer(img, None)
v_pred_sample = v_pred_sample.overlay_instances(
boxes=sampled_boxes,
assigned_colors=["red"] * len(sampled_boxes),
alpha=1.0,
)
prop_img = v_pred_sample.get_image()
vis_name = f"sample_{i}_image_id_{inputs[0]['image_id']}.png"
cv2.imwrite(vis_name, prop_img)
cv2.waitKey()
@abstractmethod
def post_processing_standard_nms(self, input_im):
pass
@abstractmethod
def post_processing_output_statistics(self, input_im):
pass
@abstractmethod
def post_processing_mc_dropout_ensembles(self, input_im):
pass
@abstractmethod
def post_processing_ensembles(self, input_im, model_list):
pass
@abstractmethod
def post_processing_bayes_od(self, input_im):
pass
@abstractmethod
def post_processing_topk_detections(self, input_im):
pass
| 9,017
| 35.959016
| 166
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_inference/probabilistic_detr_predictor.py
|
import numpy as np
import torch
import torch.nn.functional as F
# DETR imports
from detr.util.box_ops import box_cxcywh_to_xyxy
# Detectron Imports
from detectron2.structures import Boxes
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class DetrProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# These are mock variables to be compatible with probabilistic detectron library. No NMS is performed for DETR.
# Only needed for ensemble methods
self.test_nms_thresh = 0.5
self.test_topk_per_image = self.model.detr.num_queries
def detr_probabilistic_inference(self,
input_im):
outputs = self.model(input_im,
return_raw_results=True,
is_mc_dropout=self.mc_dropout_enabled)
image_width = input_im[0]['image'].shape[2]
image_height = input_im[0]['image'].shape[1]
# Handle logits and classes
predicted_logits = outputs['pred_logits'][0]
if 'pred_logits_var' in outputs.keys():
predicted_logits_var = outputs['pred_logits_var'][0]
box_cls_dists = torch.distributions.normal.Normal(
predicted_logits, scale=torch.sqrt(
torch.exp(predicted_logits_var)))
predicted_logits = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob_vectors = predicted_prob_vectors.mean(0)
else:
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob, classes_idxs = predicted_prob_vectors[:, :-1].max(-1)
# Handle boxes and covariance matrices
predicted_boxes = outputs['pred_boxes'][0]
# Rescale boxes to inference image size (not COCO original size)
pred_boxes = Boxes(box_cxcywh_to_xyxy(predicted_boxes))
pred_boxes.scale(scale_x=image_width, scale_y=image_height)
predicted_boxes = pred_boxes.tensor
# Rescale boxes to inference image size (not COCO original size)
if 'pred_boxes_cov' in outputs.keys():
predicted_boxes_covariance = covariance_output_to_cholesky(
outputs['pred_boxes_cov'][0])
predicted_boxes_covariance = torch.matmul(
predicted_boxes_covariance, predicted_boxes_covariance.transpose(
1, 2))
transform_mat = torch.tensor([[[1.0, 0.0, -0.5, 0.0],
[0.0, 1.0, 0.0, -0.5],
[1.0, 0.0, 0.5, 0.0],
[0.0, 1.0, 0.0, 0.5]]]).to(self.model.device)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
transform_mat,
predicted_boxes_covariance),
transform_mat.transpose(
1,
2))
scale_mat = torch.diag_embed(
torch.as_tensor(
(image_width,
image_height,
image_width,
image_height),
dtype=torch.float32)).to(
self.model.device).unsqueeze(0)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
scale_mat,
predicted_boxes_covariance),
torch.transpose(scale_mat, 2, 1))
else:
predicted_boxes_covariance = []
if 'ppp' in outputs:
ppp = outputs['ppp']
else:
ppp = []
return predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors, ppp
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.detr_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs)
def post_processing_topk_detections(self, input_im):
"""
This function produces results using topk selection based on confidence scores.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.detr_probabilistic_inference(input_im)
return inference_utils.general_topk_detection_postprocessing(input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
Output statistics does not make much sense for DETR architecture. There is some redundancy due to forced 100
detections per image, but cluster sizes would be too small for meaningful estimates. Might implement it later
on.
"""
raise NotImplementedError
pass
def post_processing_mc_dropout_ensembles(self, input_im):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.detr_probabilistic_inference(input_im),
self.test_nms_thresh,
self.test_topk_per_image) for _ in range(
self.num_mc_dropout_runs)]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
Since there is no NMS step in DETR, bayesod is not implemented. Although possible to add NMS
and implement it later on.
"""
raise NotImplementedError
pass
| 9,040
| 40.095455
| 119
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_modeling/losses.py
|
from collections import defaultdict
from math import comb
from math import factorial
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from core.fastmurty.mhtdaClink import (allocateWorkvarsforDA,
deallocateWorkvarsforDA, mhtda, sparse)
from core.fastmurty.mhtdaClink import sparsifyByRow as sparsify
from scipy.optimize import linear_sum_assignment
from torch.distributions.multivariate_normal import MultivariateNormal
from probabilistic_modeling.modeling_utils import (
clamp_log_variance, covariance_output_to_cholesky)
def reshape_box_preds(preds, num_classes):
"""
Tiny helper function to reshape box predictions from [numpreds,classes*boxdim] to [numpreds,classes,boxdim]
"""
num_preds, *_ = preds.shape
if num_preds == 0:
return preds
if len(preds.shape) == 2:
preds = preds.unsqueeze(1)
if preds.shape[-1] > num_classes: # if box predicted per class
preds = preds.reshape(num_preds, num_classes, -1)
else:
preds = preds.repeat(1, num_classes, 1)
return preds
def run_murtys(cost_matrix: torch.tensor, nsolutions: int):
"""
Run fastmurtys given cost_matrix and number of assignments to search for.
Returns associations and costs.
Based on example_simplest.py in fastmurty.
"""
# make all costs negative for algo to work properly
cost_matrix_max = cost_matrix.max()
if cost_matrix_max >= 0:
cost_matrix = cost_matrix - (cost_matrix_max + 1)
cost_matrix = cost_matrix.detach().numpy()
nrows, ncolumns = cost_matrix.shape
# sparse cost matrices only include a certain number of elements
# the rest are implicitly infinity
# in this case, the sparse matrix includes all elements
# The sparse and dense versions are compiled differently (see the Makefile).
# The variable "sparse" in mhtdaClink needs to match the version compiled
cost_matrix_to_use = sparsify(cost_matrix, ncolumns) if sparse else cost_matrix
# mhtda is set up to potentially take multiple input hypotheses for both rows and columns
# input hypotheses specify a subset of rows or columns.
# In this case, we just want to use the whole matrix.
row_priors = np.ones((1, nrows), dtype=np.bool8)
col_priors = np.ones((1, ncolumns), dtype=np.bool8)
# Each hypothesis has a relative weight too.
# These values don't matter if there is only one hypothesis...
row_prior_weights = np.zeros(1)
col_prior_weights = np.zeros(1)
# The mhtda function modifies preallocated outputs rather than
# allocating new ones. This is slightly more efficient for repeated use
# within a tracker.
# The cost of each returned association:
out_costs = np.zeros(nsolutions)
# The row-column pairs in each association:
# Generally there will be less than nrows+ncolumns pairs in an association.
# The unused pairs are currently set to (-2, -2)
out_associations = np.zeros((nsolutions, nrows + ncolumns, 2), dtype=np.int32)
# variables needed within the algorithm (a C function sets this up):
workvars = allocateWorkvarsforDA(nrows, ncolumns, nsolutions)
# run!
mhtda(
cost_matrix_to_use,
row_priors,
row_prior_weights,
col_priors,
col_prior_weights,
out_associations,
out_costs,
workvars,
)
deallocateWorkvarsforDA(workvars)
return out_associations, out_costs
def compute_negative_log_likelihood(
box_scores: torch.tensor,
box_regs: torch.tensor,
box_covars: torch.tensor,
gt_box: torch.tensor,
gt_class: torch.tensor,
image_size: List[int],
reg_distribution: torch.distributions.distribution.Distribution,
associations: np.ndarray,
device: torch.device,
intensity_func=lambda x: 0.00000001,
scores_have_bg_cls=False,
target_delta=None,
pred_delta=None,
pred_delta_chol=None,
):
"""Compute NLL for given associations.
Args:
box_scores (torch.tensor): [description]
box_regs (torch.tensor): [description]
box_covars (torch.tensor): [description]
gt_box (torch.tensor): [description]
gt_class (torch.tensor): [description]
image_size (List[int]): [description]
reg_distribution (torch.distributions.distribution.Distribution): [description]
associations (np.ndarray[np.int32]): [description]
device (torch.device): [description]
intensity_func ([type], optional): [description]. Defaults to lambdax:0.00000001.
Returns:
[type]: [description]
"""
if type(image_size) is not torch.tensor:
image_size = torch.tensor(image_size)
img_size = image_size.unsqueeze(0).to(device)
existance_prob = 1 - box_scores[:, -1]
num_preds, num_classes = box_scores.shape
if scores_have_bg_cls:
num_classes -= 1 # do not count background class
num_gt, _ = gt_box.shape
out_dict = defaultdict(list)
out_dict.update(
{
"matched_bernoulli": [],
"unmatched_bernoulli": [],
"matched_ppp": [],
"matched_bernoulli_reg": [],
"matched_bernoulli_cls": [],
"num_matched_bernoulli": [],
"num_unmatched_bernoulli": [],
"num_matched_ppp": [],
"ppp_integral": None,
}
)
nll = torch.zeros(len(associations), dtype=torch.float64, device=device)
for a, association in enumerate(associations):
log_matched_bernoulli = torch.tensor(0, dtype=torch.float64, device=device)
log_unmatched_bernoulli = torch.tensor(0, dtype=torch.float64, device=device)
log_poisson = torch.tensor(0, dtype=torch.float64, device=device)
log_matched_regression = torch.tensor(0, dtype=torch.float64, device=device)
log_matched_classification = torch.tensor(0, dtype=torch.float64, device=device)
num_matched_bernoulli = 0
num_unmatched_bernoulli = 0
num_matched_ppp = 0
log_matched_bernoulli_regs = []
log_matched_bernoulli_cls = []
log_unmatched_bernoullis = []
log_matched_ppps = []
for pair in association:
pred = pair[0]
gt = pair[1]
if (
0 <= pred < num_preds
) and gt >= 0: # if bernoulli was assigned to a GT element
num_matched_bernoulli += 1
assigned_gt = gt
k = pred
gt_c = gt_class[assigned_gt]
if scores_have_bg_cls:
r = existance_prob[k]
else:
r = box_scores[k, gt_c]
covar = box_covars[k, gt_c]
if target_delta is None:
covar = box_covars[k, gt_c]
dist = reg_distribution(box_regs[k, gt_c, :], covar)
regression = dist.log_prob(gt_box[assigned_gt, :]).sum()
classification = torch.log(box_scores[k, gt_c])
else:
covar = pred_delta_chol[k, gt_c]
dist = reg_distribution(pred_delta[k, gt_c, :], covar)
regression = dist.log_prob(target_delta[k, assigned_gt, :]).sum()
classification = torch.log(box_scores[k, gt_c])
log_f = regression + classification
# Save stats
log_matched_bernoulli_regs.append(-regression.squeeze().item())
log_matched_bernoulli_cls.append(-classification.squeeze().item())
# Update total bernoulli component
log_matched_bernoulli = log_matched_bernoulli + log_f.squeeze()
log_matched_regression = log_matched_regression + regression.squeeze()
log_matched_classification = (
log_matched_classification + classification.squeeze()
)
elif (
0 <= pred < num_preds
) and gt == -1: # if bernoulli was not assigned to a GT element
num_unmatched_bernoulli += 1
k = pred
if scores_have_bg_cls:
log_f = torch.log(1 - existance_prob[k])
else:
log_f = torch.log(1 - box_scores[k].max())
log_unmatched_bernoulli = log_unmatched_bernoulli + log_f.squeeze()
# Save stats
log_unmatched_bernoullis.append(-log_f.squeeze().item())
elif (pred >= num_preds) and (
gt >= 0
): # if poisson was assigned to a GT element
num_matched_ppp += 1
assigned_gt = gt
gt_c = gt_class[assigned_gt].unsqueeze(0)
gt_vec = torch.cat([gt_box[assigned_gt, :], gt_c])
log_f = intensity_func(gt_vec.unsqueeze(0), img_size).squeeze()
log_poisson = log_poisson + log_f
# Save stats
log_matched_ppps.append(-log_f.item())
association_sum = log_matched_bernoulli + log_unmatched_bernoulli + log_poisson
out_dict["matched_bernoulli"].append(-log_matched_bernoulli.item())
out_dict["matched_bernoulli_reg"].append(-log_matched_regression.item())
out_dict["matched_bernoulli_cls"].append(-log_matched_classification.item())
out_dict["num_matched_bernoulli"].append(num_matched_bernoulli)
out_dict["unmatched_bernoulli"].append(-log_unmatched_bernoulli.item())
out_dict["num_unmatched_bernoulli"].append(num_unmatched_bernoulli)
out_dict["matched_ppp"].append(-log_poisson.item())
out_dict["num_matched_ppp"].append(num_matched_ppp)
out_dict["matched_bernoulli_regs"].append(log_matched_bernoulli_regs)
out_dict["matched_bernoulli_clss"].append(log_matched_bernoulli_cls)
out_dict["unmatched_bernoullis"].append(log_unmatched_bernoullis)
out_dict["matched_ppps"].append(log_matched_ppps)
nll[a] = association_sum
nll = torch.logsumexp(nll, -1)
n_class = torch.tensor(num_classes).unsqueeze(0).to(device)
ppp_regularizer = intensity_func(None, img_size, n_class, integrate=True).squeeze()
nll = ppp_regularizer - nll
out_dict["ppp_integral"] = ppp_regularizer.item()
out_dict["total"] = [
out_dict["matched_bernoulli"][i]
+ out_dict["unmatched_bernoulli"][i]
+ out_dict["matched_ppp"][i]
+ out_dict["ppp_integral"]
for i in range(len(associations))
]
return nll, out_dict
def negative_log_likelihood_matching(
box_scores: torch.tensor,
box_regs: torch.tensor,
box_covars: torch.tensor,
gt_box: torch.tensor,
gt_class: torch.tensor,
image_size: List[int],
reg_distribution: torch.distributions.distribution.Distribution,
device: torch.device,
intensity_func=lambda x: 0.00000001,
max_n_solutions: int = 5,
scores_have_bg_cls=False,
target_delta=None,
distance_type="log_prob",
covar_scaling = 1,
use_target_delta_matching=True,
pred_delta=None,
pred_delta_chol=None,
):
img_size = torch.tensor(image_size).unsqueeze(0).to(device)
num_preds, num_classes = box_scores.shape
if scores_have_bg_cls:
num_classes -= 1 # do not count background class
num_gt = gt_box.shape[0]
existance_prob = 1 - box_scores[:, -1]
# Init potential covar scaling for matching
covar_scaling = torch.eye(box_covars.shape[-1]).to(box_covars.device)*covar_scaling
# save indices of inf cost
infinite_costs = []
with torch.no_grad():
if not(num_gt > 0 and num_preds > 0):
associations = -np.ones((1, num_preds + num_gt, 2))
if num_gt > 0:
associations[0, -num_gt:, 1] = np.arange(num_gt)
associations[0, :, 0] = np.arange(num_preds + num_gt)
associations = associations.astype(np.int32)
return associations
# Assemble and fill cost matrix
cost_matrix = torch.zeros((num_preds + num_gt, num_gt), dtype=torch.float64)
if scores_have_bg_cls:
r = existance_prob.unsqueeze(-1).repeat(1, num_gt)
else:
r = box_scores[:, gt_class] # assume existance prob == class prob
covar = box_covars[:, gt_class] if pred_delta_chol is None or not use_target_delta_matching else pred_delta_chol[:, gt_class]
reg_means = box_regs if pred_delta is None or not use_target_delta_matching else pred_delta
# Repeat gt to be [num_preds,num_gt,dim] if needed
if len(gt_box.shape) < len(reg_means[:, gt_class].shape):
gt_box = gt_box.unsqueeze(0).repeat(num_preds, 1, 1)
if distance_type == "log_prob":
# Covar is actually cholesky decomposed, hence only one multiplication with scaling
scaled_covar = covar_scaling@covar
dist = reg_distribution(reg_means[:, gt_class], scaled_covar)
if target_delta is None or not use_target_delta_matching:
log_p = dist.log_prob(gt_box)
else:
log_p = dist.log_prob(target_delta)
elif distance_type == "euclidian_squared":
# We use minus since its sign is reversed later (and cost should be minimized)
if target_delta is None or not use_target_delta_matching:
log_p = -(reg_means[:, gt_class] - gt_box).pow(2).sum(-1)
else:
log_p = -(reg_means[:, gt_class] - target_delta).pow(2).sum(-1)
elif distance_type == "euclidian":
# We use minus since its sign is reversed later (and cost should be minimized)
if target_delta is None or not use_target_delta_matching:
log_p = -(reg_means[:, gt_class] - gt_box).pow(2).sum(-1).sqrt()
else:
log_p = (
-(reg_means[:, gt_class] - target_delta).pow(2).sum(-1).sqrt()
)
else:
raise NotImplementedError(
f'Distance type for PMB-NLL matching "{distance_type}" not implemented.'
)
log_p = log_p.sum(-1) if len(log_p.shape) > 2 else log_p
log_p = log_p + torch.log(
box_scores[:, gt_class]
) # box regression + class scores conditioned on existance
cost = -(log_p - torch.log(1 - r))
cost_matrix[:num_preds] = cost
if not torch.isfinite(cost).all():
for k, l in torch.isfinite(cost).logical_not().nonzero():
infinite_costs.append((k, l))
cost_matrix[k, l] = 0
# Build GT vector with [box, class]
if target_delta is None or not use_target_delta_matching:
gt_vec = torch.cat([gt_box[0, :, :], gt_class.unsqueeze(-1)], -1)
else:
gt_vec = torch.cat([target_delta[0, :, :], gt_class.unsqueeze(-1)], -1)
# PPP cost
cost = -intensity_func(gt_vec, img_size, dist_type=distance_type)
if torch.isfinite(cost).all():
cost_matrix[num_preds:] = torch.diag(cost)
else:
cost_matrix[num_preds:] = torch.diag(cost)
for l in torch.isfinite(cost).logical_not().nonzero():
infinite_costs.append((num_preds + l, l))
cost_matrix[num_preds + l, l] = 0
# Fill in "inf"
if cost_matrix.numel() > 0:
largest_cost = cost_matrix.max()
for k in range(num_preds, num_preds + num_gt): # loop over predictions
for l in range(num_gt): # loop over ground truths
if k != (l + num_preds):
cost_matrix[k, l] = largest_cost * 3
for coord in infinite_costs:
k, l = coord
cost_matrix[k, l] = largest_cost * 2
# Find nsolutions best solutions
nsolutions = 0
for i in range(num_gt+1):
if i > num_preds or nsolutions > max_n_solutions:
break
nsolutions += (factorial(num_preds)//factorial(num_preds-i))*comb(num_gt, i)
nsolutions = min(
max_n_solutions, nsolutions
) # comb gives maximum number unique associations
try:
associations, _ = run_murtys(cost_matrix, nsolutions)
except AssertionError:
print(
"[NLLOD] Murtys could not find solution! Using linear sum assignment."
)
row_ind, col_ind = linear_sum_assignment(cost_matrix.cpu().numpy())
associations = -np.ones((1, num_preds + num_gt, 2))
associations[0, :, 0] = np.arange(num_preds + num_gt)
associations[0, row_ind, 1] = col_ind
associations = associations.astype(np.int32)
return associations
def negative_log_likelihood(
pred_box_scores: List[torch.tensor],
pred_box_regs: List[torch.tensor],
pred_box_covars: List[torch.tensor],
gt_boxes: List[torch.tensor],
gt_classes: List[torch.tensor],
image_sizes: List[List[int]],
reg_distribution: torch.distributions.distribution.Distribution,
intensity_func=lambda x: 0.00000001,
max_n_solutions: int = 5,
training: bool = True,
scores_have_bg_cls: bool = True,
target_deltas: torch.tensor = None,
matching_distance: str = "log_prob",
covar_scaling: float = 1.0,
use_target_delta_matching=False,
pred_deltas=None,
pred_delta_chols=None,
):
"""
Calculate NLL for a PMB prediction.
"""
assert len(pred_box_scores) == len(pred_box_regs) == len(pred_box_covars)
device = pred_box_scores[0].device
nll_total_losses = torch.tensor(
0, dtype=torch.float64, device=device, requires_grad=training
)
bs = len(pred_box_scores)
total_associations = []
total_decompositions = []
for i in range(bs): # loop over images
if type(intensity_func) == list:
if type(intensity_func[i]) != dict:
ppp = {"matching": intensity_func[i], "loss": intensity_func[i]}
else:
ppp = intensity_func[i]
else:
if type(intensity_func) != dict:
ppp = {"matching": intensity_func, "loss": intensity_func}
else:
ppp = intensity_func
# [N, num_classes] or [N, num_classes+1]
box_scores = pred_box_scores[i]
num_preds, num_classes = box_scores.shape
if scores_have_bg_cls:
num_classes -= 1 # do not count background class
# [N, num_classes, boxdims]
box_regs = pred_box_regs[i]
# [N, num_classes, boxdims, boxdims]
box_covars = pred_box_covars[i]
# [M, boxdims]
gt_box = gt_boxes[i]
# [M, 1]
gt_class = gt_classes[i]
if target_deltas is None:
target_delta = None
else:
# [N, M, boxdims]
target_delta = target_deltas[i]
if pred_deltas is None:
pred_delta = None
else:
# [N, M, boxdims]
pred_delta = pred_deltas[i]
if pred_delta_chols is None:
pred_delta_chol = None
else:
# [N, M, boxdims]
pred_delta_chol = pred_delta_chols[i]
image_size = image_sizes[i]
associations = negative_log_likelihood_matching(
box_scores,
box_regs,
box_covars,
gt_box,
gt_class,
image_size,
reg_distribution,
device,
ppp["matching"],
max_n_solutions,
scores_have_bg_cls,
target_delta,
matching_distance,
covar_scaling,
use_target_delta_matching,
pred_delta,
pred_delta_chol,
)
nll, decomposition = compute_negative_log_likelihood(
box_scores=box_scores,
box_regs=box_regs,
box_covars=box_covars,
gt_box=gt_box,
gt_class=gt_class,
image_size=image_size,
reg_distribution=reg_distribution,
associations=associations,
device=device,
intensity_func=ppp["loss"],
scores_have_bg_cls=scores_have_bg_cls,
target_delta=target_delta,
pred_delta=pred_delta,
pred_delta_chol=pred_delta_chol,
)
if torch.isfinite(nll):
# Normalize by num predictions if training
if training:
number_preds = decomposition["num_matched_ppp"][0]+decomposition["num_matched_bernoulli"][0]+decomposition["num_unmatched_bernoulli"][0]
regularizer = max(1, number_preds)
nll_total_losses = nll_total_losses + nll / regularizer
else:
nll_total_losses = nll_total_losses + nll
else:
bs = max(1, bs - 1)
print("WARNING: Infinite loss in NLL!")
print(f"box scores: {box_scores}")
print(f"box_regs: {box_regs}")
print(f"box_covars: {box_covars}")
print(f"gt_box: {gt_box}")
print(f"gt_class: {gt_class}")
print(f"associations: {associations}")
total_associations.append(associations)
total_decompositions.append(decomposition)
return nll_total_losses / bs, total_associations, total_decompositions
| 21,481
| 37.846293
| 152
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_modeling/probabilistic_retinanet.py
|
import logging
import math
from typing import List, Tuple
import numpy as np
import torch
from core.visualization_tools.probabilistic_visualizer import ProbabilisticVisualizer
from detectron2.data.detection_utils import convert_image_to_rgb
# Detectron Imports
from detectron2.layers import ShapeSpec, batched_nms, cat, nonzero_tuple
from detectron2.modeling.anchor_generator import build_anchor_generator
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.retinanet import (
RetinaNet,
RetinaNetHead,
permute_to_N_HWA_K,
)
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.structures import Boxes, Instances
from detectron2.utils.events import get_event_storage
from fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss
from matplotlib import cm
from probabilistic_inference import inference_utils
from torch import Tensor, distributions, nn
from probabilistic_modeling.losses import (
negative_log_likelihood,
negative_log_likelihood_matching,
)
# Project Imports
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessIntensityFunction,
clamp_log_variance,
covariance_output_to_cholesky,
get_probabilistic_loss_weight,
unscented_transform,
PoissonPointUnion,
)
@META_ARCH_REGISTRY.register()
class ProbabilisticRetinaNet(RetinaNet):
"""
Probabilistic retinanet class.
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != "none"
self.cls_var_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
)
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != "none"
self.bbox_cov_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
)
self.bbox_cov_dist_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
self.bbox_cov_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
)
if self.bbox_cov_type == "diagonal":
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
self.ppp_constructor = lambda x: PoissonPointProcessIntensityFunction(
cfg, **x
)
self.ppp_intensity_function = PoissonPointProcessIntensityFunction(cfg, device=self.device)
self.nll_max_num_solutions = (
cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS
)
self.matching_distance = cfg.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE
self.use_prediction_mixture = cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.current_step = 0
self.annealing_step = (
cfg.SOLVER.STEPS[1]
if cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP <= 0
else cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP
)
# Define custom probabilistic head
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.head_in_features]
self.head = ProbabilisticRetinaNetHead(
cfg,
self.use_dropout,
self.dropout_rate,
self.compute_cls_var,
self.compute_bbox_cov,
self.bbox_cov_dims,
feature_shapes,
)
# Send to device
self.to(self.device)
def get_ppp_intensity_function(self):
return self.ppp_intensity_function
def forward(
self, batched_inputs, return_anchorwise_output=False, num_mc_dropout_runs=-1
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_anchorwise_output (bool): returns raw output for probabilistic inference
num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and
not full neural network.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
# Update step
try:
self.current_step += get_event_storage().iter
except:
self.current_step += 1
# Preprocess image
images = self.preprocess_image(batched_inputs)
# Extract features and generate anchors
features = self.backbone(images.tensor)
features = [features[f] for f in self.head_in_features]
anchors = self.anchor_generator(features)
# MC_Dropout inference forward
if num_mc_dropout_runs > 1:
anchors = anchors * num_mc_dropout_runs
features = features * num_mc_dropout_runs
output_dict = self.produce_raw_output(anchors, features)
return output_dict
# Regular inference forward
if return_anchorwise_output:
return self.produce_raw_output(anchors, features)
# Training and validation forward
(
pred_logits,
pred_anchor_deltas,
pred_logits_vars,
pred_anchor_deltas_vars,
) = self.head(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
if pred_logits_vars is not None:
pred_logits_vars = [
permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits_vars
]
if pred_anchor_deltas_vars is not None:
pred_anchor_deltas_vars = [
permute_to_N_HWA_K(x, self.bbox_cov_dims)
for x in pred_anchor_deltas_vars
]
if self.training:
assert (
"instances" in batched_inputs[0]
), "Instance annotations are missing in training!"
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
gt_classes, gt_boxes = self.label_anchors(anchors, gt_instances)
self.anchors = torch.cat(
[Boxes.cat(anchors).tensor for i in range(len(gt_instances))], 0
)
# Loss is computed based on what values are to be estimated by the neural
# network
losses = self.losses(
anchors,
gt_classes,
gt_boxes,
pred_logits,
pred_anchor_deltas,
pred_logits_vars,
pred_anchor_deltas_vars,
gt_instances,
images.image_sizes,
)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
results = self.inference(
anchors, pred_logits, pred_anchor_deltas, images.image_sizes
)
self.visualize_training(
batched_inputs,
results,
pred_logits,
pred_anchor_deltas,
pred_anchor_deltas_vars,
anchors,
)
return losses
else:
results = self.inference(
anchors, pred_logits, pred_anchor_deltas, images.image_sizes
)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image[0], height, width)
processed_results.append({"instances": r})
return processed_results
def visualize_training(
self,
batched_inputs,
results,
pred_logits,
pred_anchor_deltas,
pred_anchor_deltas_vars,
anchors,
):
"""
A function used to visualize ground truth images and final network predictions.
It shows ground truth bounding boxes on the original image and up to 20
predicted object bounding boxes on the original image.
Args:
batched_inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements.
"""
from detectron2.utils.visualizer import Visualizer
pred_instaces, kept_idx = results
assert len(batched_inputs) == len(
pred_instaces
), "Cannot visualize inputs and results of different sizes"
storage = get_event_storage()
max_boxes = 20
image_index = 0 # only visualize a single image
img = batched_inputs[image_index]["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
# Extract NMS kept predictions
box_scores = torch.cat([logits.squeeze() for logits in pred_logits])[
kept_idx
].sigmoid()
box_scores = torch.cat(
(box_scores, 1 - pred_instaces[image_index].scores.unsqueeze(-1)), dim=-1
)
anchor_deltas = torch.cat([delta.squeeze() for delta in pred_anchor_deltas])[
kept_idx
]
anchor_delta_vars = torch.cat(
[var.squeeze() for var in pred_anchor_deltas_vars]
)[kept_idx]
anchor_boxes = torch.cat([box.tensor.squeeze() for box in anchors])[kept_idx]
cholesky_decomp = covariance_output_to_cholesky(anchor_delta_vars)
######## Get covariance for corner coordinates instead #########
multivariate_normal_samples = torch.distributions.MultivariateNormal(
anchor_deltas, scale_tril=cholesky_decomp
)
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample((1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2
)
samples_proposals = torch.repeat_interleave(
anchor_boxes.unsqueeze(2), 1000, dim=2
)
# Transform samples from deltas to boxes
box_transform = inference_utils.SampleBox2BoxTransform(
self.box2box_transform.weights
)
t_dist_samples = box_transform.apply_samples_deltas(
distributions_samples, samples_proposals
)
# Compute samples mean and covariance matrices.
_, boxes_covars = inference_utils.compute_mean_covariance_torch(t_dist_samples)
# Scale if image has been reshaped during processing
scale_x, scale_y = (
img.shape[1] / pred_instaces[image_index].image_size[1],
img.shape[0] / pred_instaces[image_index].image_size[0],
)
scaling = torch.tensor(np.stack([scale_x, scale_y, scale_x, scale_y]) ** 2).to(
device=boxes_covars.device
)
boxes_covars = (boxes_covars * scaling).float()
processed_results = detector_postprocess(
pred_instaces[image_index], img.shape[0], img.shape[1]
)
predicted_boxes = processed_results.pred_boxes.tensor
if self.bbox_cov_dist_type == "gaussian":
reg_distribution = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(x, y)
)
elif self.bbox_cov_dist_type == "laplacian":
reg_distribution = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=(y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2))
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
associations = negative_log_likelihood_matching(
box_scores,
box_regs=predicted_boxes.unsqueeze(1).repeat(1, 80, 1),
box_covars=boxes_covars.unsqueeze(1).repeat(1, 80, 1, 1),
gt_box=batched_inputs[image_index]["instances"].gt_boxes.tensor,
gt_class=batched_inputs[image_index]["instances"].gt_classes,
image_size=img.shape,
reg_distribution=reg_distribution,
device=boxes_covars.device,
intensity_func=self.ppp_intensity_function,
max_n_solutions=1,
)
################# Draw results ####################
color_map = cm.get_cmap("tab20")
num_gt = batched_inputs[image_index]["instances"].gt_boxes.tensor.shape[0]
gt_colors = [color_map(i) for i in range(num_gt)]
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(
boxes=batched_inputs[image_index]["instances"].gt_boxes,
assigned_colors=gt_colors,
)
anno_img = v_gt.get_image()
num_preds = len(boxes_covars)
pred_colors = [(0.0, 0.0, 0.0, 1.0)] * num_preds
for i in range(num_preds):
matched_gt = associations[0, i, 1]
if matched_gt >= 0:
pred_colors[i] = color_map(matched_gt)
pred_labels = [
f"{pred_class.item()}: {round(pred_score.item(),2)}"
for pred_class, pred_score in zip(
pred_instaces[image_index].pred_classes,
pred_instaces[image_index].scores,
)
]
v_pred = ProbabilisticVisualizer(img, None)
v_pred = v_pred.overlay_covariance_instances(
boxes=predicted_boxes[:max_boxes].detach().cpu().numpy(),
covariance_matrices=boxes_covars[:max_boxes].detach().cpu().numpy(),
assigned_colors=pred_colors,
labels=pred_labels[:max_boxes],
)
prop_img = v_pred.get_image()
vis_img = np.vstack((anno_img, prop_img))
vis_img = vis_img.transpose(2, 0, 1)
vis_name = (
f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results"
)
storage.put_image(vis_name, vis_img)
def losses(
self,
anchors,
gt_classes,
gt_boxes,
pred_class_logits,
pred_anchor_deltas,
pred_class_logits_var=None,
pred_bbox_cov=None,
gt_instances=None,
image_sizes: List[Tuple[int, int]] = [],
):
"""
Args:
For `gt_classes` and `gt_anchors_deltas` parameters, see
:meth:`RetinaNet.get_ground_truth`.
Their shapes are (N, R) and (N, R, 4), respectively, where R is
the total number of anchors across levels, i.e. sum(Hi x Wi x A)
For `pred_class_logits`, `pred_anchor_deltas`, `pred_class_logits_var` and `pred_bbox_cov`, see
:meth:`RetinaNetHead.forward`.
Returns:
dict[str: Tensor]:
mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The dict keys are:
"loss_cls" and "loss_box_reg"
"""
num_images = len(gt_classes)
gt_labels = torch.stack(gt_classes) # (N, R)
# Do NMS before reshaping stuff
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
with torch.no_grad():
nms_results = self.inference(
anchors, pred_class_logits, pred_anchor_deltas, image_sizes
)
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
gt_anchor_deltas = [
self.box2box_transform.get_deltas(anchors, k) for k in gt_boxes
]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
valid_mask = gt_labels >= 0
pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + (
1 - self.loss_normalizer_momentum
) * max(num_pos_anchors, 1)
# classification and regression loss
# Shapes:
# (N x R, K) for class_logits and class_logits_var.
# (N x R, 4), (N x R x 10) for pred_anchor_deltas and pred_class_bbox_cov respectively.
# Transform per-feature layer lists to a single tensor
pred_class_logits = cat(pred_class_logits, dim=1)
pred_anchor_deltas = cat(pred_anchor_deltas, dim=1)
if pred_class_logits_var is not None:
pred_class_logits_var = cat(pred_class_logits_var, dim=1)
if pred_bbox_cov is not None:
pred_bbox_cov = cat(pred_bbox_cov, dim=1)
gt_classes_target = torch.nn.functional.one_hot(
gt_labels[valid_mask], num_classes=self.num_classes + 1
)[:, :-1].to(
pred_class_logits[0].dtype
) # no loss for the last (background) class
# Classification losses
if self.compute_cls_var:
# Compute classification variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
if self.cls_var_loss == "loss_attenuation":
num_samples = self.cls_var_num_samples
# Compute standard deviation
pred_class_logits_var = torch.sqrt(
torch.exp(pred_class_logits_var[valid_mask])
)
pred_class_logits = pred_class_logits[valid_mask]
# Produce normal samples using logits as the mean and the standard deviation computed above
# Scales with GPU memory. 12 GB ---> 3 Samples per anchor for
# COCO dataset.
univariate_normal_dists = distributions.normal.Normal(
pred_class_logits, scale=pred_class_logits_var
)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(num_samples,)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
(
pred_class_stochastic_logits.shape[1] * num_samples,
pred_class_stochastic_logits.shape[2],
-1,
)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.squeeze(2)
# Produce copies of the target classes to match the number of
# stochastic samples.
gt_classes_target = torch.unsqueeze(gt_classes_target, 0)
gt_classes_target = torch.repeat_interleave(
gt_classes_target, num_samples, dim=0
).view(
(
gt_classes_target.shape[1] * num_samples,
gt_classes_target.shape[2],
-1,
)
)
gt_classes_target = gt_classes_target.squeeze(2)
# Produce copies of the target classes to form the stochastic
# focal loss.
loss_cls = (
sigmoid_focal_loss_jit(
pred_class_stochastic_logits,
gt_classes_target,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
/ (num_samples * max(1, self.loss_normalizer))
)
else:
raise ValueError(
"Invalid classification loss name {}.".format(self.bbox_cov_loss)
)
else:
# Standard loss computation in case one wants to use this code
# without any probabilistic inference.
loss_cls = (
sigmoid_focal_loss_jit(
pred_class_logits[valid_mask],
gt_classes_target,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
/ max(1, self.loss_normalizer)
)
# Compute Regression Loss
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
og_pred_anchor_deltas = pred_anchor_deltas
pred_anchor_deltas = pred_anchor_deltas[pos_mask]
gt_anchors_deltas = gt_anchor_deltas[pos_mask]
if self.compute_bbox_cov:
# We have to clamp the output variance else probabilistic metrics
# go to infinity.
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
og_pred_bbox_cov = pred_bbox_cov
pred_bbox_cov = clamp_log_variance(pred_bbox_cov[pos_mask])
if self.bbox_cov_loss == "negative_log_likelihood":
if self.bbox_cov_type == "diagonal":
# Compute regression variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
# This implementation with smooth_l1_loss outperforms using
# torch.distribution.multivariate_normal. Losses might have different numerical values
# since we do not include constants in this implementation.
loss_box_reg = (
0.5
* torch.exp(-pred_bbox_cov)
* smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
)
)
loss_covariance_regularize = 0.5 * pred_bbox_cov
loss_box_reg += loss_covariance_regularize
# Sum over all elements
loss_box_reg = torch.sum(loss_box_reg) / max(
1, self.loss_normalizer
)
else:
# Multivariate negative log likelihood. Implemented with
# pytorch multivariate_normal.log_prob function. Custom implementations fail to finish training
# due to NAN loss.
# This is the Cholesky decomposition of the covariance matrix. We reconstruct it from 10 estimated
# parameters as a lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(pred_bbox_cov)
# Compute multivariate normal distribution using torch
# distribution functions.
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_anchor_deltas, scale_tril=forecaster_cholesky
)
)
loss_box_reg = -multivariate_normal_dists.log_prob(
gt_anchors_deltas
)
loss_box_reg = torch.sum(loss_box_reg) / max(
1, self.loss_normalizer
)
elif self.bbox_cov_loss == "second_moment_matching":
# Compute regression covariance using second moment matching.
loss_box_reg = smooth_l1_loss(
pred_anchor_deltas, gt_anchors_deltas, beta=self.smooth_l1_beta
)
# Compute errors
errors = pred_anchor_deltas - gt_anchors_deltas
if self.bbox_cov_type == "diagonal":
# Compute second moment matching term.
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_bbox_cov), errors ** 2, beta=self.smooth_l1_beta
)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(loss_box_reg) / max(
1, self.loss_normalizer
)
else:
# Compute second moment matching term.
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix. We reconstruct it from 10 estimated
# parameters as a lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(pred_bbox_cov)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(forecaster_cholesky, 2, 1)
)
second_moment_matching_term = smooth_l1_loss(
predicted_covar,
gt_error_covar,
beta=self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = (
torch.sum(loss_box_reg) + second_moment_matching_term
) / max(1, self.loss_normalizer)
elif self.bbox_cov_loss == "energy_loss":
# Compute regression variance according to energy score loss.
forecaster_means = pred_anchor_deltas
# Compute forecaster cholesky. Takes care of diagonal case
# automatically.
forecaster_cholesky = covariance_output_to_cholesky(pred_bbox_cov)
# Define normal distribution samples. To compute energy score,
# we need i+1 samples.
# Define per-anchor Distributions
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
forecaster_means, scale_tril=forecaster_cholesky
)
)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,)
)
distributions_samples_1 = distributions_samples[
0 : self.bbox_cov_num_samples, :, :
]
distributions_samples_2 = distributions_samples[
1 : self.bbox_cov_num_samples + 1, :, :
]
# Compute energy score
gt_anchors_deltas_samples = torch.repeat_interleave(
gt_anchors_deltas.unsqueeze(0), self.bbox_cov_num_samples, dim=0
)
energy_score_first_term = (
2.0
* smooth_l1_loss(
distributions_samples_1,
gt_anchors_deltas_samples,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # First term
energy_score_second_term = (
-smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # Second term
# Final Loss
loss_box_reg = (
energy_score_first_term + energy_score_second_term
) / max(1, self.loss_normalizer)
elif self.bbox_cov_loss == "pmb_negative_log_likelihood":
pred_class_scores = pred_class_logits.sigmoid()
losses = self.nll_od_loss_with_nms(
nms_results,
gt_instances,
anchors,
pred_class_scores,
og_pred_anchor_deltas,
og_pred_bbox_cov,
image_sizes,
)
loss_box_reg = losses["loss_box_reg"]
use_nll_loss = True
else:
raise ValueError(
"Invalid regression loss name {}.".format(self.bbox_cov_loss)
)
# Perform loss annealing. Essential for reliably training variance estimates using NLL in RetinaNet.
# For energy score and second moment matching, this is optional.
standard_regression_loss = (
smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ max(1, self.loss_normalizer)
)
probabilistic_loss_weight = get_probabilistic_loss_weight(
self.current_step, self.annealing_step
)
loss_box_reg = (
1.0 - probabilistic_loss_weight
) * standard_regression_loss + probabilistic_loss_weight * loss_box_reg
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
loss_cls = (1.0 - probabilistic_loss_weight) * loss_cls
else:
# Standard regression loss in case no variance is needed to be
# estimated.
loss_box_reg = (
smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ max(1, self.loss_normalizer)
)
if use_nll_loss:
losses["loss_cls"] = loss_cls
losses["loss_box_reg"] = loss_box_reg
else:
losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
return losses
def nll_od_loss_with_nms(
self,
nms_results,
gt_instances,
anchors,
scores,
deltas,
pred_covs,
image_shapes,
):
if "log_prob" in self.matching_distance and self.matching_distance != "log_prob":
covar_scaling = float(self.matching_distance.split("_")[-1])
matching_distance = "log_prob"
else:
covar_scaling = 1
matching_distance = self.matching_distance
self.ppp_intensity_function.update_distribution()
instances, kept_idx = nms_results
bs = len(instances)
boxes = [
self.box2box_transform.apply_deltas(delta, anchors) for delta in deltas
]
nll_pred_cov = [
pred_cov[kept].unsqueeze(1).repeat(1, self.num_classes, 1)
for pred_cov, kept in zip(pred_covs, kept_idx)
]
nll_pred_cov = [covariance_output_to_cholesky(cov) for cov in nll_pred_cov]
nll_scores = [score[kept] for score, kept in zip(scores, kept_idx)]
nll_pred_deltas = [
delta[kept].unsqueeze(1).repeat(1, self.num_classes, 1)
for delta, kept in zip(deltas, kept_idx)
]
gt_boxes = [instances.gt_boxes.tensor for instances in gt_instances]
nll_gt_classes = [instances.gt_classes for instances in gt_instances]
kept_proposals = [anchors[idx] for idx in kept_idx]
trans_func = lambda x,y: self.box2box_transform.apply_deltas(x,y)
box_means = []
box_chols = []
for i in range(bs):
box_mean, box_chol = unscented_transform(nll_pred_deltas[i], nll_pred_cov[i], kept_proposals[i], trans_func)
box_means.append(box_mean)
box_chols.append(box_chol)
if self.bbox_cov_dist_type == "gaussian":
regression_dist = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(
loc=x, scale_tril=y
)
)
elif self.bbox_cov_dist_type == "laplacian":
# Map cholesky decomp to laplacian scale
regression_dist = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2)
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
nll_scores = [
torch.cat(
(
nll_scores[i],
(
1
- nll_scores[i][
torch.arange(len(kept_idx[i])), instances[i].pred_classes
]
).unsqueeze(-1),
),
dim=-1,
)
for i in range(bs)
]
# Clamp for numerical stability
nll_scores = [scores.clamp(1e-6, 1 - 1e-6) for scores in nll_scores]
if self.use_prediction_mixture:
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
#max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
max_conf = 1 - pred_cls_probs[..., -1]
ppp_preds_idx = (
max_conf <= self.ppp_intensity_function.ppp_confidence_thres
)
props = kept_proposals[i][ppp_preds_idx.logical_not()]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
mixture_dict["covs"] = pred_box_chols[ppp_preds_idx, 0]@pred_box_chols[ppp_preds_idx, 0].transpose(-1,-2)
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": pred_box_chols[ppp_preds_idx, 0]
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
pred_box_chols[ppp_preds_idx, 0].diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
loss_ppp = PoissonPointUnion()
loss_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
loss_ppp.add_ppp(self.ppp_intensity_function)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
scale_mat = torch.eye(pred_box_chols.shape[-1]).to(pred_box_chols.device)*covar_scaling
scaled_chol = scale_mat@pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = (scaled_chol)@(scaled_chol.transpose(-1,-2))
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": scaled_chol
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
(scaled_chol).diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
match_ppp = PoissonPointUnion()
match_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
match_ppp.add_ppp(self.ppp_intensity_function)
ppps.append({"matching": match_ppp, "loss": loss_ppp})
src_boxes_tot.append(pred_box_means[ppp_preds_idx.logical_not()])
src_box_chol_tot.append(pred_box_chols[ppp_preds_idx.logical_not()])
src_scores_tot.append(pred_cls_probs[ppp_preds_idx.logical_not()])
src_boxes_deltas_tot.append(pred_box_deltas[ppp_preds_idx.logical_not()])
src_boxes_deltas_chol_tot.append(pred_box_delta_chols[ppp_preds_idx.logical_not()])
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
elif self.ppp_intensity_function.ppp_intensity_type == "gaussian_mixture":
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
props = kept_proposals[i]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
src_boxes_tot.append(pred_box_means)
src_box_chol_tot.append(pred_box_chols)
src_scores_tot.append(pred_cls_probs)
src_boxes_deltas_tot.append(pred_box_deltas)
src_boxes_deltas_chol_tot.append(pred_box_delta_chols)
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
else:
gt_box_deltas = []
for i in range(len(gt_boxes)):
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
kept_proposals[i],
gt_boxes[i][j].unsqueeze(0).repeat(len(kept_proposals[i]), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
use_target_delta_matching = True
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
nll_pred_delta_chols = nll_pred_cov
nll_pred_deltas = nll_pred_deltas
nll_pred_boxes = nll_pred_deltas
nll_pred_cov = nll_pred_cov
nll, associations, decompositions = negative_log_likelihood(
nll_scores,
nll_pred_boxes,
nll_pred_cov,
gt_boxes,
nll_gt_classes,
image_shapes,
regression_dist,
ppps,
self.nll_max_num_solutions,
target_deltas=gt_box_deltas,
matching_distance=matching_distance,
use_target_delta_matching=use_target_delta_matching,
pred_deltas=nll_pred_deltas,
pred_delta_chols=nll_pred_delta_chols,
)
# Save some stats
storage = get_event_storage()
num_classes = self.num_classes
mean_variance = np.mean(
[
cov.diagonal(dim1=-2,dim2=-1)
.pow(2)
.mean()
.item()
for cov in nll_pred_cov
if cov.shape[0] > 0
]
)
storage.put_scalar("nll/mean_covariance", mean_variance)
ppp_intens = np.sum([ppp["loss"].integrate(
torch.as_tensor(image_shapes).to(self.device), num_classes
)
.mean()
.item()
for ppp in ppps
])
storage.put_scalar("nll/ppp_intensity", ppp_intens)
reg_loss = np.mean(
[
np.clip(
decomp["matched_bernoulli_reg"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_match = np.mean(
[
np.clip(
decomp["matched_bernoulli_cls"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_no_match = np.mean(
[
np.clip(
decomp["unmatched_bernoulli"][0]
/ (decomp["num_unmatched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
# Collect all losses
losses = dict()
losses["loss_box_reg"] = nll
# Add losses for logging, these do not propagate gradients
losses["loss_regression"] = torch.tensor(reg_loss).to(nll.device)
losses["loss_cls_matched"] = torch.tensor(cls_loss_match).to(nll.device)
losses["loss_cls_unmatched"] = torch.tensor(cls_loss_no_match).to(nll.device)
return losses
def produce_raw_output(self, anchors, features):
"""
Given anchors and features, produces raw pre-nms output to be used for custom fusion operations.
"""
# Perform inference run
(
pred_logits,
pred_anchor_deltas,
pred_logits_vars,
pred_anchor_deltas_vars,
) = self.head(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
if pred_logits_vars is not None:
pred_logits_vars = [
permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits_vars
]
if pred_anchor_deltas_vars is not None:
pred_anchor_deltas_vars = [
permute_to_N_HWA_K(x, self.bbox_cov_dims)
for x in pred_anchor_deltas_vars
]
# Create raw output dictionary
raw_output = {"anchors": anchors}
# Shapes:
# (N x R, K) for class_logits and class_logits_var.
# (N x R, 4), (N x R x 10) for pred_anchor_deltas and pred_class_bbox_cov respectively.
raw_output.update(
{
"box_cls": pred_logits,
"box_delta": pred_anchor_deltas,
"box_cls_var": pred_logits_vars,
"box_reg_var": pred_anchor_deltas_vars,
}
)
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
ppp_output = self.ppp_intensity_function.get_weights()
raw_output.update({"ppp": ppp_output})
return raw_output
def inference(
self,
anchors: List[Boxes],
pred_logits: List[Tensor],
pred_anchor_deltas: List[Tensor],
image_sizes: List[Tuple[int, int]],
):
"""
Arguments:
anchors (list[Boxes]): A list of #feature level Boxes.
The Boxes contain anchors of this image on the specific feature level.
pred_logits, pred_anchor_deltas: list[Tensor], one per level. Each
has shape (N, Hi * Wi * Ai, K or 4)
image_sizes (List[(h, w)]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
results: List[Instances] = []
for img_idx, image_size in enumerate(image_sizes):
pred_logits_per_image = [x[img_idx] for x in pred_logits]
deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
results_per_image = self.inference_single_image(
anchors, pred_logits_per_image, deltas_per_image, image_size
)
results.append(results_per_image)
return [x[0] for x in results], [x[1] for x in results]
def inference_single_image(
self,
anchors: List[Boxes],
box_cls: List[Tensor],
box_delta: List[Tensor],
image_size: Tuple[int, int],
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors in that feature level.
box_cls (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (H x W x A, K)
box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4.
image_size (tuple(H, W)): a tuple of the image height and width.
Returns:
Same as `inference`, but for only one image.
"""
boxes_all = []
scores_all = []
class_idxs_all = []
anchor_idxs_all = []
# Iterate over every feature level
for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, anchors):
# (HxWxAxK,)
predicted_prob = box_cls_i.flatten().sigmoid()
# Apply two filtering below to make NMS faster.
# 1. Keep boxes with confidence score higher than threshold
keep_idxs = predicted_prob > self.test_score_thresh
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = nonzero_tuple(keep_idxs)[0]
# 2. Keep top k top scoring boxes only
num_topk = min(self.test_topk_candidates, topk_idxs.size(0))
# torch.sort is actually faster than .topk (at least on GPUs)
predicted_prob, idxs = predicted_prob.sort(descending=True)
predicted_prob = predicted_prob[:num_topk]
topk_idxs = topk_idxs[idxs[:num_topk]]
anchor_idxs = topk_idxs // self.num_classes
classes_idxs = topk_idxs % self.num_classes
box_reg_i = box_reg_i[anchor_idxs]
anchors_i = anchors_i[anchor_idxs]
# predict boxes
predicted_boxes = self.box2box_transform.apply_deltas(
box_reg_i, anchors_i.tensor
)
boxes_all.append(predicted_boxes)
scores_all.append(predicted_prob)
class_idxs_all.append(classes_idxs)
anchor_idxs_all.append(anchor_idxs)
num_anchors_per_feat_lvl = [anchor.tensor.shape[0] for anchor in anchors]
accum_anchor_nums = np.cumsum(num_anchors_per_feat_lvl).tolist()
accum_anchor_nums = [0] + accum_anchor_nums
anchor_idxs_all = [
anchor_idx + prev_num_feats
for anchor_idx, prev_num_feats in zip(anchor_idxs_all, accum_anchor_nums)
]
boxes_all, scores_all, class_idxs_all, anchor_idxs_all = [
cat(x) for x in [boxes_all, scores_all, class_idxs_all, anchor_idxs_all]
]
keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.test_nms_thresh)
keep = keep[: self.max_detections_per_image]
result = Instances(image_size)
result.pred_boxes = Boxes(boxes_all[keep])
result.scores = scores_all[keep]
result.pred_classes = class_idxs_all[keep]
return result, anchor_idxs_all[keep]
class ProbabilisticRetinaNetHead(RetinaNetHead):
"""
The head used in ProbabilisticRetinaNet for object class probability estimation, box regression, box covariance estimation.
It has three subnets for the three tasks, with a common structure but separate parameters.
"""
def __init__(
self,
cfg,
use_dropout,
dropout_rate,
compute_cls_var,
compute_bbox_cov,
bbox_cov_dims,
input_shape: List[ShapeSpec],
):
super().__init__(cfg, input_shape)
# Extract config information
# fmt: off
in_channels = input_shape[0].channels
num_classes = cfg.MODEL.RETINANET.NUM_CLASSES
num_convs = cfg.MODEL.RETINANET.NUM_CONVS
prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors
# fmt: on
assert (
len(set(num_anchors)) == 1
), "Using different number of anchors between levels is not currently supported!"
num_anchors = num_anchors[0]
self.compute_cls_var = compute_cls_var
self.compute_bbox_cov = compute_bbox_cov
self.bbox_cov_dims = bbox_cov_dims
# For consistency all configs are grabbed from original RetinaNet
self.use_dropout = use_dropout
self.dropout_rate = dropout_rate
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(nn.ReLU())
bbox_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(nn.ReLU())
if self.use_dropout:
cls_subnet.append(nn.Dropout(p=self.dropout_rate))
bbox_subnet.append(nn.Dropout(p=self.dropout_rate))
self.cls_subnet = nn.Sequential(*cls_subnet)
self.bbox_subnet = nn.Sequential(*bbox_subnet)
self.cls_score = nn.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1
)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
for modules in [
self.cls_subnet,
self.bbox_subnet,
self.cls_score,
self.bbox_pred,
]:
for layer in modules.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_score.bias, bias_value)
# Create subnet for classification variance estimation.
if self.compute_cls_var:
self.cls_var = nn.Conv2d(
in_channels,
num_anchors * num_classes,
kernel_size=3,
stride=1,
padding=1,
)
for layer in self.cls_var.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, -10.0)
# Create subnet for bounding box covariance estimation.
if self.compute_bbox_cov:
self.bbox_cov = nn.Conv2d(
in_channels,
num_anchors * self.bbox_cov_dims,
kernel_size=3,
stride=1,
padding=1,
)
for layer in self.bbox_cov.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.0001)
torch.nn.init.constant_(layer.bias, 0)
def forward(self, features):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
logits_var (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the variance of the logits modeled as a univariate
Gaussian distribution at each spatial position for each of the A anchors and K object
classes.
bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
bbox_cov (list[Tensor]): #lvl tensors, each has shape (N, Ax4 or Ax10, Hi, Wi).
The tensor predicts elements of the box
covariance values for every anchor. The dimensions of the box covarianc
depends on estimating a full covariance (10) or a diagonal covariance matrix (4).
"""
logits = []
bbox_reg = []
logits_var = []
bbox_cov = []
for feature in features:
logits.append(self.cls_score(self.cls_subnet(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))
if self.compute_cls_var:
logits_var.append(self.cls_var(self.cls_subnet(feature)))
if self.compute_bbox_cov:
bbox_cov.append(self.bbox_cov(self.bbox_subnet(feature)))
return_vector = [logits, bbox_reg]
if self.compute_cls_var:
return_vector.append(logits_var)
else:
return_vector.append(None)
if self.compute_bbox_cov:
return_vector.append(bbox_cov)
else:
return_vector.append(None)
return return_vector
| 58,037
| 39.164706
| 127
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_modeling/modeling_utils.py
|
import copy
import math
import torch
from sklearn.mixture._gaussian_mixture import _compute_precision_cholesky
from torch import nn
from torch.distributions import Distribution
from torch.distributions.categorical import Categorical
from torch.distributions.independent import Independent
from torch.distributions.laplace import Laplace
from torch.distributions.mixture_same_family import MixtureSameFamily
from torch.distributions.multivariate_normal import MultivariateNormal
class ClassRegDist(Distribution):
def __init__(
self,
loc,
reg_dist,
reg_kwargs,
probs=None,
logits=None,
independent_reg_dist=False,
):
batch_shape = loc.shape[:-1]
event_shape = torch.Size([1 + loc.shape[-1]])
self.reg_dist = reg_dist(loc, **reg_kwargs)
if independent_reg_dist:
self.reg_dist = Independent(self.reg_dist, 1)
self.cls_dist = Categorical(probs=probs, logits=logits)
self.dist_type = "log_prob"
super().__init__(batch_shape, event_shape, validate_args=False)
def log_prob(self, value):
cls_log_prob = self.cls_dist.log_prob(value[..., -1])
if self.dist_type == "euclidian":
reg_log_prob = -(self.reg_dist.mean - value[..., :-1]).pow(2).sum(-1).sqrt()
elif self.dist_type == "euclidian_squared":
reg_log_prob = -(self.reg_dist.mean - value[..., :-1]).pow(2).sum(-1)
else:
reg_log_prob = self.reg_dist.log_prob(value[..., :-1])
return cls_log_prob + reg_log_prob
def set_dist_mode(self, dist_type):
self.dist_type = dist_type
def unscented_transform(means, chols, anchors, trans_func):
""" Definition 1 in https://arxiv.org/abs/2104.01958
Args:
means (_type_): _description_
chols (_type_): _description_
anchors (_type_): _description_
trans_func (_type_): _description_
Returns:
_type_: _description_
"""
n = means.shape[-1]
kappa = n-3
if len(means.shape) > 2:
old_means_shape = means.shape
means = means.reshape(-1,n)
if len(chols > 3):
old_chol_shape = chols.shape
chols = chols.reshape(-1,n,n)
N = len(means)
weights = torch.ones((1,2*n+1,1), device=means.device)/(2*(n+kappa))
weights[0,0,0] = kappa / (n+kappa)
# means [N, n], chols [N, n, n]
# [N, 1, n]
sigma_points1 = means.unsqueeze(1)
# [N, n, n]
sigma_points2 = means.unsqueeze(1) + math.sqrt(n+kappa)*chols
# [N, n, n]
sigma_points3 = means.unsqueeze(1) - math.sqrt(n+kappa)*chols
# [N, 2n+1, n]
sigma_points = torch.cat((sigma_points1, sigma_points2, sigma_points3), dim=1)
repeated_anchors = anchors.repeat_interleave(len(means)//len(anchors),dim=0).unsqueeze(1).repeat(1,2*n+1,1).reshape(-1,n)
transformed_sigma_points = trans_func(sigma_points.reshape(-1, n), repeated_anchors)
transformed_sigma_points = transformed_sigma_points.reshape(N, 2*n+1, n)
transformed_means = (transformed_sigma_points*weights).sum(dim=1)
residuals = transformed_sigma_points-transformed_means.unsqueeze(1)
# [N, 2n+1, n, 1]
residuals = residuals.unsqueeze(-1)
# [N, n, n]
transformed_covs = (weights.unsqueeze(-1)*residuals@residuals.transpose(-1,-2)).sum(dim=1)
transformed_chols, info = torch.linalg.cholesky_ex(transformed_covs)
if not (info==0).all():
# Clamp to avoid errors
transformed_chols = torch.diag_embed(torch.diagonal(transformed_chols,dim1=-2,dim2=-1).clamp(math.exp(-7),math.exp(10)))+torch.tril(transformed_chols,-1)
print("***************************")
for cov,res,trans_mean,mean,anchor,chol in zip(transformed_covs[info!=0], residuals[info!=0].squeeze(-1), transformed_means[info!=0], means[info!=0], anchors.repeat_interleave(len(means)//len(anchors),dim=0)[info!=0], chols[info!=0]):
print(cov)
print(res)
print(trans_mean)
print(mean)
print(anchor)
print(chol)
print("+++++++++++++++++++++++++++++++++++")
print("***************************")
return transformed_means.reshape(old_means_shape), transformed_chols.reshape(old_chol_shape)
def covariance_output_to_cholesky(pred_bbox_cov):
"""
Transforms output to covariance cholesky decomposition.
Args:
pred_bbox_cov (kx4 or kx10): Output covariance matrix elements.
Returns:
predicted_cov_cholesky (kx4x4): cholesky factor matrix
"""
# Embed diagonal variance
if pred_bbox_cov.shape[0] == 0:
return pred_bbox_cov.reshape((0, 4, 4))
diag_vars = torch.sqrt(torch.exp(pred_bbox_cov[..., :4]))
predicted_cov_cholesky = torch.diag_embed(diag_vars)
if pred_bbox_cov.shape[-1] > 4:
tril_indices = torch.tril_indices(row=4, col=4, offset=-1)
predicted_cov_cholesky[..., tril_indices[0], tril_indices[1]] = pred_bbox_cov[
..., 4:
]
return predicted_cov_cholesky
def clamp_log_variance(pred_bbox_cov, clamp_min=-7.0, clamp_max=10.0):
"""
Tiny function that clamps variance for consistency across all methods.
"""
pred_bbox_var_component = torch.clamp(pred_bbox_cov[..., 0:4], clamp_min, clamp_max)
return torch.cat((pred_bbox_var_component, pred_bbox_cov[..., 4:]), dim=-1)
def get_probabilistic_loss_weight(current_step, annealing_step):
"""
Tiny function to get adaptive probabilistic loss weight for consistency across all methods.
"""
probabilistic_loss_weight = min(1.0, current_step / annealing_step)
probabilistic_loss_weight = (100 ** probabilistic_loss_weight - 1.0) / (100.0 - 1.0)
return probabilistic_loss_weight
def freeze_non_probabilistic_weights(cfg, model):
"""
Tiny function to only keep a small subset of weight non-frozen.
"""
if cfg.MODEL.TRAIN_ONLY_PPP:
print("[NLLOD]: Freezing all non-PPP weights")
for name, p in model.named_parameters():
if "ppp_intensity_function" in name:
p.requires_grad = cfg.MODEL.TRAIN_PPP
else:
p.requires_grad = False
print("[NLLOD]: Froze all non-PPP weights")
elif cfg.MODEL.TRAIN_ONLY_UNCERTAINTY_PREDS:
print("[NLLOD]: Freezing all non-probabilistic weights")
for name, p in model.named_parameters():
if "ppp_intensity_function" in name:
p.requires_grad = cfg.MODEL.TRAIN_PPP
elif "bbox_cov" in name:
p.requires_grad = True
else:
p.requires_grad = False
print("[NLLOD]: Froze all non-probabilistic weights")
else:
for name, p in model.named_parameters():
if "ppp_intensity_function" in name:
p.requires_grad = cfg.MODEL.TRAIN_PPP
class PoissonPointProcessBase(nn.Module):
def __init__(self):
super().__init__()
self.normalize_bboxes = False
def set_normalization_of_bboxes(self, normalize_bboxes):
self.normalize_bboxes = normalize_bboxes
class PoissonPointUnion(PoissonPointProcessBase):
def __init__(self):
super().__init__()
self.ppps = []
def add_ppp(self, ppp):
self.ppps.append(ppp)
def set_normalization_of_bboxes(self, normalize_bboxes):
for ppp in self.ppps:
ppp.normalize_bboxes = normalize_bboxes
def integrate(self, image_sizes, num_classes):
out = 0
for ppp in self.ppps:
out = out + ppp.integrate(image_sizes, num_classes)
return out
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
dist_type="log_prob",
):
if integrate:
out = self.integrate(image_sizes, num_classes)
return out
outs = []
for ppp in self.ppps:
outs.append(
ppp(src, image_sizes, num_classes, integrate, src_is_features, dist_type)[:, None]
)
outs = torch.cat(outs, 1)
return torch.logsumexp(outs, 1)
class PoissonPointProcessUniform(PoissonPointProcessBase):
def __init__(
self,
class_dist_log,
ppp_rate,
uniform_center_pos,
device=torch.device("cpu"),
):
super().__init__()
if not type(class_dist_log) == torch.Tensor:
class_dist_log = torch.tensor(class_dist_log)
self.class_dist_log = class_dist_log.to(device)
self.ppp_rate = torch.tensor([ppp_rate]).to(device)
self.uniform_center_pos = uniform_center_pos
self.device = device
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
):
if integrate:
return self.integrate(image_sizes, num_classes)
assert len(image_sizes) == 1
img_size = image_sizes[0].flip(0).repeat(2) # w,h,w,h
cls_log_probs = self.class_dist_log[src[..., -1].long()]
# log(1/(W^2/2) * 1/(H^2/2))
box_log_probs = (-image_sizes[0].log()*2+math.log(2)).sum()
total_log_probs = cls_log_probs + box_log_probs + self.ppp_rate.log()
return total_log_probs
def integrate(self, image_sizes, num_classes):
return self.ppp_rate
class PoissonPointProcessGMM(PoissonPointProcessBase):
def __init__(
self,
gmm,
class_dist_log,
ppp_rate,
uniform_center_pos,
device=torch.device("cpu"),
):
super().__init__()
if not type(class_dist_log) == torch.Tensor:
class_dist_log = torch.tensor(class_dist_log)
self.class_dist_log = class_dist_log.to(device)
self.gmm = gmm
self.ppp_rate = torch.tensor([ppp_rate]).to(device)
self.uniform_center_pos = uniform_center_pos
self.device = device
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
):
if integrate:
return self.integrate(image_sizes, num_classes)
assert len(image_sizes) == 1
img_size = image_sizes[0].flip(0).repeat(2) # w,h,w,h
scale = torch.diag_embed(img_size).cpu().numpy()
gmm = copy.deepcopy(self.gmm)
boxes = src[..., :-1]
if self.uniform_center_pos:
gmm.means_ = gmm.means_ * img_size.cpu().numpy()[:2]
gmm.covariances_ = scale[:2, :2] @ gmm.covariances_ @ scale[:2, :2].T
gmm.precisions_cholesky_ = _compute_precision_cholesky(
gmm.covariances_, gmm.covariance_type
)
img_area = img_size[0] * img_size[1]
# N, 2 (w,h)
box_sizes = torch.cat(
(
(boxes[..., 2] - boxes[..., 0])[:, None],
(boxes[..., 3] - boxes[..., 1])[:, None],
),
1,
)
box_log_probs = torch.tensor(gmm.score_samples(box_sizes.cpu().numpy())).to(
box_sizes.device
)
box_log_probs = box_log_probs - img_area.log()
else:
gmm.means_ = gmm.means_ * img_size.cpu().numpy()
gmm.covariances_ = scale @ gmm.covariances_ @ scale.T
gmm.precisions_cholesky_ = _compute_precision_cholesky(
gmm.covariances_, gmm.covariance_type
)
box_log_probs = torch.tensor(gmm.score_samples(boxes.cpu().numpy())).to(
boxes.device
)
cls_log_probs = self.class_dist_log[src[..., -1].long()]
total_log_probs = cls_log_probs + box_log_probs + self.ppp_rate.log()
return total_log_probs
def integrate(self, image_sizes, num_classes):
return self.ppp_rate
class ZeroDistribution(PoissonPointProcessBase):
def __init__(self, device=torch.device("cuda"))-> None:
super().__init__()
self.device = device
self.component_distribution = None
def log_prob(self, src, *args, **kwargs):
return torch.tensor(0.0).to(src.device).unsqueeze(0).repeat(len(src)).log()
class PoissonPointProcessIntensityFunction(PoissonPointProcessBase):
"""
Class representing a Poisson Point Process RFS intensity function. Currently assuming DETR/RCNN/RetinaNet.
"""
def __init__(
self, cfg, log_intensity=None, ppp_feature_net=None, predictions=None, device="cuda"
) -> None:
super().__init__()
self.device = device
if cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES and predictions is not None:
self.ppp_intensity_type = "prediction_mixture"
elif log_intensity is not None:
self.ppp_intensity_type = "uniform"
self.num_classes = 1
else:
self.ppp_intensity_type = (
cfg.MODEL.PROBABILISTIC_MODELING.PPP.INTENSITY_TYPE
)
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.ppp_confidence_thres = cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES
self.ppp_feature_net = ppp_feature_net
if self.ppp_intensity_type == "uniform":
self.ppp_intensity_per_coord = nn.Parameter(
torch.tensor(1.0).to(self.device), requires_grad=True
)
self.log_ppp_intensity_class = nn.Parameter(
torch.tensor(1.0).to(self.device), requires_grad=True
)
if log_intensity is None:
nn.init.constant_(
self.ppp_intensity_per_coord,
cfg.MODEL.PROBABILISTIC_MODELING.PPP.UNIFORM_INTENSITY,
)
nn.init.constant_(
self.log_ppp_intensity_class,
math.log(1 / cfg.MODEL.ROI_HEADS.NUM_CLASSES),
)
else:
nn.init.constant_(self.ppp_intensity_per_coord, log_intensity)
nn.init.constant_(self.log_ppp_intensity_class, 0)
self.log_ppp_intensity_class.requires_grad = False
elif self.ppp_intensity_type == "gaussian_mixture":
num_mixture_comps = cfg.MODEL.PROBABILISTIC_MODELING.PPP.NUM_GAUSS_MIXTURES
cov_type = cfg.MODEL.PROBABILISTIC_MODELING.PPP.COV_TYPE
if cov_type == "diagonal":
cov_dims = 4
elif cov_type == "full":
cov_dims = 10
else:
cov_dims = 4
self.log_gmm_weights = nn.Parameter(
(torch.ones(num_mixture_comps)*0.5).log().to(self.device),
requires_grad=True,
)
nn.init.normal_(self.log_gmm_weights, mean=0, std=0.1)
means = torch.distributions.Normal(torch.tensor([0.5]).to(self.device), scale=torch.tensor([0.16]).to(self.device)).rsample((num_mixture_comps, 4,)).squeeze(-1)
xywh_to_xyxy = torch.tensor([[1,0,-0.5,0],[0,1,0,-0.5],[1,0,0.5,0],[0,1,0,0.5]]).to(self.device)
means = (xywh_to_xyxy@(means.unsqueeze(-1))).squeeze(-1)
means = means.clamp(0,1)
self.gmm_means = nn.Parameter(
means, requires_grad=True
)
self.gmm_chols = nn.Parameter(
torch.zeros(num_mixture_comps, cov_dims).to(self.device), requires_grad=True
)
nn.init.normal_(self.gmm_chols, std=1)
cls_probs = torch.ones(num_mixture_comps, self.num_classes).to(self.device)/self.num_classes + torch.rand((num_mixture_comps, self.num_classes)).to(self.device)*0.1
cls_logits = (cls_probs/(1-cls_probs)).log()
self.class_logits = nn.Parameter(
cls_logits, requires_grad=True
) # these are softmaxed later
#self.mvn = MultivariateNormal(self.gmm_means, scale_tril=self.gmm_chols)
reg_kwargs = {"scale_tril": covariance_output_to_cholesky(self.gmm_chols)}
mixture_dict = {}
mixture_dict["means"] = self.gmm_means
mixture_dict["weights"] = self.log_gmm_weights.exp()
mixture_dict["reg_dist"] = torch.distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = reg_kwargs
mixture_dict["cls_probs"] = self.class_logits.softmax(dim=-1)
mixture_dict["reg_dist_type"] = "gaussian"
mixture_dict["covs"] = None
self.mixture_from_predictions(mixture_dict)
elif self.ppp_intensity_type == "prediction_mixture":
if predictions is not None:
self.mixture_from_predictions(predictions)
elif self.ppp_intensity_type == "zero":
self.dist = ZeroDistribution(self.device)
else:
raise NotImplementedError(
f"PPP intensity type {cfg.MODEL.PROBABILISTIC_MODELING.PPP_INTENSITY_TYPE} not implemented."
)
def mixture_from_predictions(self, mixture_dict):
reg_dist_str = mixture_dict["reg_dist_type"]
means = mixture_dict["means"]
covs = mixture_dict["covs"]
weights = mixture_dict["weights"]
cls_probs = mixture_dict["cls_probs"]
reg_kwargs = mixture_dict["reg_kwargs"]
independent_reg_dist = False
reg_dist = mixture_dict["reg_dist"]
if reg_dist_str == "laplacian":
independent_reg_dist = True
if not len(weights):
self.mixture_dist = ZeroDistribution(means.device)
self.ppp_rate = torch.tensor(0.0).to(means.device)
else:
self.mixture_dist = MixtureSameFamily(
Categorical(weights),
ClassRegDist(
means,
reg_dist,
reg_kwargs,
probs=cls_probs,
independent_reg_dist=independent_reg_dist,
),
validate_args=False,
)
self.ppp_rate = weights.sum()
def get_weights(self):
weights = dict()
if self.ppp_intensity_type == "uniform":
weights["ppp_intensity_per_coord"] = self.ppp_intensity_per_coord
weights["log_ppp_intensity_class"] = self.log_ppp_intensity_class
elif self.ppp_intensity_type == "gaussian_mixture":
return weights
weights["log_gmm_weights"] = self.log_gmm_weights
weights["gmm_means"] = self.gmm_means
weights["gmm_covs"] = self.gmm_covs
weights["class_weights"] = self.class_weights
weights["log_class_scaling"] = self.log_class_scaling
return weights
def load_weights(self, weights):
if self.ppp_intensity_type == "uniform":
self.ppp_intensity_per_coord = nn.Parameter(
torch.as_tensor(weights["ppp_intensity_per_coord"])
)
self.log_ppp_intensity_class = nn.Parameter(
torch.as_tensor(weights["log_ppp_intensity_class"])
)
elif self.ppp_intensity_type == "gaussian_mixture":
self.log_gmm_weights = nn.Parameter(
torch.as_tensor(weights["log_gmm_weights"])
)
self.gmm_means = nn.Parameter(torch.as_tensor(weights["gmm_means"]))
self.gmm_covs = nn.Parameter(torch.as_tensor(weights["gmm_covs"]))
self.class_weights = nn.Parameter(torch.as_tensor(weights["class_weights"]))
self.log_class_scaling = nn.Parameter(
torch.as_tensor(weights["log_class_scaling"])
)
self.update_distribution()
def update_distribution(self):
if self.ppp_intensity_type == "gaussian_mixture":
mixture_dict = {}
mixture_dict["means"] = self.gmm_means
mixture_dict["weights"] = self.log_gmm_weights.exp()
mixture_dict["reg_dist"] = torch.distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {"scale_tril": covariance_output_to_cholesky(self.gmm_chols)}
mixture_dict["cls_probs"] = self.class_logits.softmax(dim=-1)
mixture_dict["reg_dist_type"] = "gaussian"
mixture_dict["covs"] = None
self.mixture_from_predictions(mixture_dict)
def forward_features(self, src):
print("[NLLOD] Data dependent PPP not available yet")
return
out = self.ppp_feature_net(src)
if self.ppp_intensity_type == "gaussian_mixture":
pass
# translate output to gmm params
return
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
dist_type="log_prob"
):
"""Calculate log PPP intensity for given input. If numclasses =! -1, returns integral over intensity
Args:
src ([type]): [description]
image_sizes (list, optional): [description]. Defaults to [].
num_classes (int, optional): [description]. Defaults to -1.
Returns:
[type]: [description]
"""
if src_is_features:
return self.forward_features(src)
if integrate:
return self.integrate(image_sizes, num_classes)
if self.ppp_intensity_type == "uniform":
# Returns log intensity func value
coord_log_prob = self.ppp_intensity_per_coord
if src.shape[-1] > 4:
src = src[..., :4]
# keep gradients trough src, +1 to handle coodinates in zero
out = (src + 1) / (src.detach() + 1) * coord_log_prob
out = out.sum(-1)
class_log_prob = self.log_ppp_intensity_class
out = out + class_log_prob
elif self.ppp_intensity_type == "gaussian_mixture":
if self.normalize_bboxes:
# H,W -> (flip) -> W,H -> (repeat) -> W,H,W,H
box_scaling = 1/image_sizes.flip((-1)).repeat(1,2).float()
class_scaling = torch.ones((len(image_sizes),1)).to(src.device)
# [1, 5]
scaling = torch.cat([box_scaling, class_scaling], dim=-1)
# [num_gt, 5]
scaling = scaling.repeat(src.shape[0],1)
src = src*scaling
else:
scaling = torch.ones_like(src)
if self.mixture_dist.component_distribution:
self.mixture_dist.component_distribution.set_dist_mode(dist_type)
out = self.mixture_dist.log_prob(src)
out = out + self.ppp_rate.log()
out = out + scaling.log().sum(dim=-1)
elif self.ppp_intensity_type == "prediction_mixture":
if self.mixture_dist.component_distribution:
self.mixture_dist.component_distribution.set_dist_mode(dist_type)
out = self.mixture_dist.log_prob(src)
out = out + self.ppp_rate.log()
elif self.ppp_intensity_type == "zero":
out = self.dist.log_prob(src)
return out
def integrate(self, image_sizes, num_classes):
if self.ppp_intensity_type == "uniform":
# Evaluate the integral of the intensity funciton of all possible inputs
coord_log_prob = self.ppp_intensity_per_coord
class_log_prob = self.log_ppp_intensity_class
# Divide by 2 because x1 < x2 and y1 < y2
image_part = torch.log(
image_sizes[:, 0] ** 2 / 2 * image_sizes[:, 1] ** 2 / 2
) + (4 * coord_log_prob)
class_part = math.log(num_classes) + class_log_prob
out = (image_part + class_part).exp()
elif self.ppp_intensity_type == "gaussian_mixture":
out = self.ppp_rate
elif self.ppp_intensity_type == "prediction_mixture":
out = self.ppp_rate
elif self.ppp_intensity_type == "zero":
out = torch.zeros(len(image_sizes)).to(image_sizes.device)
else:
out = torch.zeros(len(image_sizes)).to(image_sizes.device)
return out
| 24,254
| 36.488408
| 242
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_modeling/__init__.py
| 0
| 0
| 0
|
py
|
|
pmb-nll
|
pmb-nll-main/src/probabilistic_modeling/probabilistic_generalized_rcnn.py
|
import logging
from typing import Dict, List, Optional, Tuple, Union
# Detectron imports
import fvcore.nn.weight_init as weight_init
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.layers import Conv2d, Linear, ShapeSpec, cat, get_norm
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.box_head import ROI_BOX_HEAD_REGISTRY
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.structures import Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from fvcore.nn import smooth_l1_loss
# Project imports
from probabilistic_inference.inference_utils import get_dir_alphas
from torch import distributions, nn
from torch.nn import functional as F
from probabilistic_modeling.losses import negative_log_likelihood, reshape_box_preds
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessIntensityFunction,
clamp_log_variance,
covariance_output_to_cholesky,
get_probabilistic_loss_weight,
unscented_transform,
PoissonPointUnion,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@META_ARCH_REGISTRY.register()
class ProbabilisticGeneralizedRCNN(GeneralizedRCNN):
"""
Probabilistic GeneralizedRCNN class.
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != "none"
self.cls_var_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
)
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != "none"
self.bbox_cov_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
)
self.bbox_cov_dist_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
self.bbox_cov_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
)
if self.bbox_cov_type == "diagonal":
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.num_mc_dropout_runs = -1
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
ppp_constructor = lambda x: PoissonPointProcessIntensityFunction(
cfg, **x
)
self.nll_max_num_solutions = (
cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS
)
self.current_step = 0
# Define custom probabilistic head
self.roi_heads.box_predictor = ProbabilisticFastRCNNOutputLayers(
cfg,
input_shape=self.roi_heads.box_head.output_shape,
compute_cls_var=self.compute_cls_var,
cls_var_loss=self.cls_var_loss,
cls_var_num_samples=self.cls_var_num_samples,
compute_bbox_cov=self.compute_bbox_cov,
bbox_cov_loss=self.bbox_cov_loss,
bbox_cov_type=self.bbox_cov_type,
bbox_cov_dims=self.bbox_cov_dims,
bbox_cov_num_samples=self.bbox_cov_num_samples,
ppp_constructor=ppp_constructor,
nll_max_num_solutions=self.nll_max_num_solutions,
bbox_cov_dist_type=self.bbox_cov_dist_type,
matching_distance=cfg.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE,
use_prediction_mixture=cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE,
)
# Send to device
self.to(self.device)
def get_ppp_intensity_function(self):
return self.roi_heads.box_predictor.ppp_intensity_function
def forward(
self, batched_inputs, return_anchorwise_output=False, num_mc_dropout_runs=-1
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_anchorwise_output (bool): returns raw output for probabilistic inference
num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and
not full neural network.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
try:
self.current_step += get_event_storage().iter
except:
self.current_step += 1
if not self.training and num_mc_dropout_runs == -1:
if return_anchorwise_output:
return self.produce_raw_output(batched_inputs)
else:
return self.inference(batched_inputs)
elif self.training and num_mc_dropout_runs > 1:
self.num_mc_dropout_runs = num_mc_dropout_runs
output_list = []
for i in range(num_mc_dropout_runs):
output_list.append(self.produce_raw_output(batched_inputs))
return output_list
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN,
"'targets' in the model inputs is now renamed to 'instances'!",
n=10,
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(
images, features, proposals, gt_instances, current_step=self.current_step
)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
# TODO: implement to visualize probabilistic outputs
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def produce_raw_output(self, batched_inputs, detected_instances=None):
"""
Run inference on the given inputs and return proposal-wise output for later postprocessing.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
Returns:
same as in :meth:`forward`.
"""
raw_output = dict()
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
# Create raw output dictionary
raw_output.update({"proposals": proposals[0]})
results, _ = self.roi_heads(
images,
features,
proposals,
None,
produce_raw_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs,
)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(
features, detected_instances
)
box_cls, box_delta, box_cls_var, box_reg_var = results
raw_output.update(
{
"box_cls": box_cls,
"box_delta": box_delta,
"box_cls_var": box_cls_var,
"box_reg_var": box_reg_var,
}
)
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
ppp_output = (
self.roi_heads.box_predictor.ppp_intensity_function.get_weights()
)
raw_output.update({"ppp": ppp_output})
return raw_output
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from core.visualization_tools.probabilistic_visualizer import (
ProbabilisticVisualizer as Visualizer,
)
storage = get_event_storage()
max_vis_prop = 20
with torch.no_grad():
self.eval()
predictions = self.produce_raw_output(batched_inputs)
self.train()
predictions = (
predictions["box_cls"],
predictions["box_delta"],
predictions["box_cls_var"],
predictions["box_reg_var"],
)
_, _, _, pred_covs = predictions
boxes = self.roi_heads.box_predictor.predict_boxes(predictions, proposals)
scores = self.roi_heads.box_predictor.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
# Apply NMS without score threshold
instances, kept_idx = fast_rcnn_inference(
boxes,
scores,
image_shapes,
0.0,
self.roi_heads.box_predictor.test_nms_thresh,
self.roi_heads.box_predictor.test_topk_per_image,
)
num_prop_per_image = [len(p) for p in proposals]
pred_covs = pred_covs.split(num_prop_per_image)
pred_covs = [pred_cov[kept] for pred_cov, kept in zip(pred_covs, kept_idx)]
pred_scores = [score[kept] for score, kept in zip(scores, kept_idx)]
pred_boxes = [box[kept] for box, kept in zip(boxes, kept_idx)]
for i, (input, prop) in enumerate(zip(batched_inputs, proposals)):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
boxes = pred_boxes[i][0:box_size, :4].cpu().numpy()
pred_cov_matrix = pred_covs[i][0:box_size, :4]
pred_cov_matrix = clamp_log_variance(pred_cov_matrix)
chol = covariance_output_to_cholesky(pred_cov_matrix)
cov = (
torch.matmul(chol, torch.transpose(chol, -1, -2)).cpu().detach().numpy()
)
v_pred = v_pred.overlay_covariance_instances(
boxes=boxes, covariance_matrices=cov
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
@ROI_HEADS_REGISTRY.register()
class ProbabilisticROIHeads(StandardROIHeads):
"""
Probabilistic ROI heads, inherit from standard ROI heads so can be used with mask RCNN in theory.
"""
def __init__(self, cfg, input_shape):
super(ProbabilisticROIHeads, self).__init__(cfg, input_shape)
self.is_mc_dropout_inference = False
self.produce_raw_output = False
self.current_step = 0
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
num_mc_dropout_runs=-1,
produce_raw_output=False,
current_step=0.0,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
self.is_mc_dropout_inference = num_mc_dropout_runs > 1
self.produce_raw_output = produce_raw_output
self.current_step = current_step
del images
if self.training and not self.is_mc_dropout_inference:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
# del targets
if self.training and not self.is_mc_dropout_inference:
losses = self._forward_box(features, proposals, targets)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals, targets)
if self.produce_raw_output:
return pred_instances, {}
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def _forward_box(
self,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
gt_instances: List[Instances],
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if self.produce_raw_output:
return predictions
if self.training:
losses = self.box_predictor.losses(
predictions, proposals, self.current_step, gt_instances
)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes
):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances
class ProbabilisticFastRCNNOutputLayers(nn.Module):
"""
Four linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
(3) box regression deltas covariance parameters (if needed)
(4) classification logits variance (if needed)
"""
@configurable
def __init__(
self,
input_shape,
*,
box2box_transform,
num_classes,
cls_agnostic_bbox_reg=False,
smooth_l1_beta=0.0,
test_score_thresh=0.0,
test_nms_thresh=0.5,
test_topk_per_image=100,
compute_cls_var=False,
compute_bbox_cov=False,
bbox_cov_dims=4,
cls_var_loss="none",
cls_var_num_samples=10,
bbox_cov_loss="none",
bbox_cov_type="diagonal",
dropout_rate=0.0,
annealing_step=0,
bbox_cov_num_samples=1000,
ppp_constructor=None,
nll_max_num_solutions=5,
bbox_cov_dist_type=None,
matching_distance="log_prob",
use_prediction_mixture=False,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss.
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
compute_cls_var (bool): compute classification variance
compute_bbox_cov (bool): compute box covariance regression parameters.
bbox_cov_dims (int): 4 for diagonal covariance, 10 for full covariance.
cls_var_loss (str): name of classification variance loss.
cls_var_num_samples (int): number of samples to be used for loss computation. Usually between 10-100.
bbox_cov_loss (str): name of box covariance loss.
bbox_cov_type (str): 'diagonal' or 'full'. This is used to train with loss functions that accept both types.
dropout_rate (float): 0-1, probability of drop.
annealing_step (int): step used for KL-divergence in evidential loss to fully be functional.
ppp_intensity_function (func): function that returns PPP intensity given sample box
nll_max_num_solutions (int): Maximum NLL solutions to consider when computing NLL-PMB loss
"""
super().__init__()
if isinstance(input_shape, int): # some backward compatibility
input_shape = ShapeSpec(channels=input_shape)
self.num_classes = num_classes
input_size = (
input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)
)
self.compute_cls_var = compute_cls_var
self.compute_bbox_cov = compute_bbox_cov
self.bbox_cov_dims = bbox_cov_dims
self.bbox_cov_num_samples = bbox_cov_num_samples
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self.cls_var_loss = cls_var_loss
self.cls_var_num_samples = cls_var_num_samples
self.annealing_step = annealing_step
self.bbox_cov_loss = bbox_cov_loss
self.bbox_cov_type = bbox_cov_type
self.bbox_cov_dist_type = bbox_cov_dist_type
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1.0 if cls_agnostic_bbox_reg else num_classes
box_dim = len(box2box_transform.weights)
self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.compute_cls_var:
self.cls_var = Linear(input_size, num_classes + 1)
nn.init.normal_(self.cls_var.weight, std=0.0001)
nn.init.constant_(self.cls_var.bias, 0)
if self.compute_bbox_cov:
self.bbox_cov = Linear(input_size, num_bbox_reg_classes * bbox_cov_dims)
nn.init.normal_(self.bbox_cov.weight, std=0.0001)
nn.init.constant_(self.bbox_cov.bias, 0.0)
self.box2box_transform = box2box_transform
self.smooth_l1_beta = smooth_l1_beta
self.test_score_thresh = test_score_thresh
self.test_nms_thresh = test_nms_thresh
self.test_topk_per_image = test_topk_per_image
self.ppp_intensity_function = ppp_constructor({"device": device}) if ppp_constructor is not None else None
self.ppp_constructor = ppp_constructor
self.nll_max_num_solutions = nll_max_num_solutions
self.matching_distance = matching_distance
self.use_prediction_mixture = use_prediction_mixture
@classmethod
def from_config(
cls,
cfg,
input_shape,
compute_cls_var,
cls_var_loss,
cls_var_num_samples,
compute_bbox_cov,
bbox_cov_loss,
bbox_cov_type,
bbox_cov_dims,
bbox_cov_num_samples,
ppp_constructor,
nll_max_num_solutions,
):
return {
"input_shape": input_shape,
"box2box_transform": Box2BoxTransform(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
),
# fmt: off
"num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES,
"cls_agnostic_bbox_reg": cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
"smooth_l1_beta": cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
"test_score_thresh": cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
"test_nms_thresh": cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
"compute_cls_var": compute_cls_var,
"cls_var_loss": cls_var_loss,
"cls_var_num_samples": cls_var_num_samples,
"compute_bbox_cov": compute_bbox_cov,
"bbox_cov_dims": bbox_cov_dims,
"bbox_cov_loss": bbox_cov_loss,
"bbox_cov_type": bbox_cov_type,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,
"annealing_step": cfg.SOLVER.STEPS[1] if cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP <= 0 else cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP,
"bbox_cov_num_samples": bbox_cov_num_samples,
"ppp_constructor": ppp_constructor,
"nll_max_num_solutions" : nll_max_num_solutions,
'bbox_cov_dist_type': cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE,
"use_prediction_mixture": cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE
# fmt: on
}
def forward(self, x):
"""
Args:
x: per-region features of shape (N, ...) for N bounding boxes to predict.
Returns:
Tensor: Nx(K+1) logits for each box
Tensor: Nx4 or Nx(Kx4) bounding box regression deltas.
Tensor: Nx(K+1) logits variance for each box.
Tensor: Nx4(10) or Nx(Kx4(10)) covariance matrix parameters. 4 if diagonal, 10 if full.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
# Compute logits variance if needed
if self.compute_cls_var:
score_vars = self.cls_var(x)
else:
score_vars = None
# Compute box covariance if needed
if self.compute_bbox_cov:
proposal_covs = self.bbox_cov(x)
else:
proposal_covs = None
return scores, proposal_deltas, score_vars, proposal_covs
def losses(self, predictions, proposals, current_step=0, gt_instances=None):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
current_step: current optimizer step. Used for losses with an annealing component.
gt_instances: list of ground truth instances
Returns:
Dict[str, Tensor]: dict of losses
"""
global device
# Overwrite later
use_nll_loss = False
(
pred_class_logits,
pred_proposal_deltas,
pred_class_logits_var,
pred_proposal_covs,
) = predictions
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
proposals_boxes = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not proposals_boxes.tensor.requires_grad
), "Proposals should not require gradients!"
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
gt_classes = cat([p.gt_classes for p in proposals], dim=0)
else:
proposals_boxes = Boxes(
torch.zeros(0, 4, device=pred_proposal_deltas.device)
)
no_instances = len(proposals) == 0 # no instances found
# Compute Classification Loss
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_cls = 0.0 * F.cross_entropy(
pred_class_logits,
torch.zeros(0, dtype=torch.long, device=pred_class_logits.device),
reduction="sum",
)
else:
if self.compute_cls_var:
# Compute classification variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
if self.cls_var_loss == "loss_attenuation":
num_samples = self.cls_var_num_samples
# Compute standard deviation
pred_class_logits_var = torch.sqrt(torch.exp(pred_class_logits_var))
# Produce normal samples using logits as the mean and the standard deviation computed above
# Scales with GPU memory. 12 GB ---> 3 Samples per anchor for
# COCO dataset.
univariate_normal_dists = distributions.normal.Normal(
pred_class_logits, scale=pred_class_logits_var
)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(num_samples,)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
(
pred_class_stochastic_logits.shape[1] * num_samples,
pred_class_stochastic_logits.shape[2],
-1,
)
)
pred_class_logits = pred_class_stochastic_logits.squeeze(2)
# Produce copies of the target classes to match the number of
# stochastic samples.
gt_classes_target = torch.unsqueeze(gt_classes, 0)
gt_classes_target = torch.repeat_interleave(
gt_classes_target, num_samples, dim=0
).view((gt_classes_target.shape[1] * num_samples, -1))
gt_classes_target = gt_classes_target.squeeze(1)
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes_target, reduction="mean"
)
elif self.cls_var_loss == "evidential":
# ToDo: Currently does not provide any reasonable mAP Results
# (15% mAP)
# Assume dirichlet parameters are output.
alphas = get_dir_alphas(pred_class_logits)
# Get sum of all alphas
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Generate one hot vectors for ground truth
one_hot_vectors = torch.nn.functional.one_hot(
gt_classes, alphas.shape[1]
)
# Compute loss. This loss attempts to put all evidence on the
# correct location.
per_instance_loss = one_hot_vectors * (
torch.digamma(dirichlet_s) - torch.digamma(alphas)
)
# Compute KL divergence regularizer loss
estimated_dirichlet = torch.distributions.dirichlet.Dirichlet(
(alphas - 1.0) * (1.0 - one_hot_vectors) + 1.0
)
uniform_dirichlet = torch.distributions.dirichlet.Dirichlet(
torch.ones_like(one_hot_vectors).type(torch.FloatTensor).to(device)
)
kl_regularization_loss = torch.distributions.kl.kl_divergence(
estimated_dirichlet, uniform_dirichlet
)
# Compute final loss
annealing_multiplier = torch.min(
torch.as_tensor(current_step / self.annealing_step).to(device),
torch.as_tensor(1.0).to(device),
)
per_proposal_loss = (
per_instance_loss.sum(1)
+ annealing_multiplier * kl_regularization_loss
)
# Compute evidence auxiliary loss
evidence_maximization_loss = smooth_l1_loss(
dirichlet_s,
100.0 * torch.ones_like(dirichlet_s).to(device),
beta=self.smooth_l1_beta,
reduction="mean",
)
evidence_maximization_loss *= annealing_multiplier
# Compute final loss
foreground_loss = per_proposal_loss[
(gt_classes >= 0) & (gt_classes < pred_class_logits.shape[1] - 1)
]
background_loss = per_proposal_loss[
gt_classes == pred_class_logits.shape[1] - 1
]
loss_cls = (
torch.mean(foreground_loss) + torch.mean(background_loss)
) / 2 + 0.01 * evidence_maximization_loss
else:
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes, reduction="mean"
)
# Compute regression loss:
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_box_reg = 0.0 * smooth_l1_loss(
pred_proposal_deltas,
torch.zeros_like(pred_proposal_deltas),
0.0,
reduction="sum",
)
else:
gt_proposal_deltas = self.box2box_transform.get_deltas(
proposals_boxes.tensor, gt_boxes.tensor
)
box_dim = gt_proposal_deltas.size(1) # 4 or 5
cls_agnostic_bbox_reg = pred_proposal_deltas.size(1) == box_dim
device = pred_proposal_deltas.device
bg_class_ind = pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds produces a valid loss of zero as long as the size_average
# arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
# and would produce a nan loss).
fg_inds = torch.nonzero(
(gt_classes >= 0) & (gt_classes < bg_class_ind), as_tuple=True
)[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for
# agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
fg_gt_classes = gt_classes[fg_inds]
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background
# classes.
gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(
box_dim, device=device
)
gt_covar_class_cols = self.bbox_cov_dims * fg_gt_classes[
:, None
] + torch.arange(self.bbox_cov_dims, device=device)
loss_reg_normalizer = gt_classes.numel()
pred_proposal_deltas = pred_proposal_deltas[fg_inds[:, None], gt_class_cols]
gt_proposals_delta = gt_proposal_deltas[fg_inds]
if self.compute_bbox_cov:
pred_proposal_covs = pred_proposal_covs[
fg_inds[:, None], gt_covar_class_cols
]
pred_proposal_covs = clamp_log_variance(pred_proposal_covs)
if self.bbox_cov_loss == "negative_log_likelihood":
if self.bbox_cov_type == "diagonal":
# Ger foreground proposals.
_proposals_boxes = proposals_boxes.tensor[fg_inds]
# Compute regression negative log likelihood loss according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
loss_box_reg = (
0.5
* torch.exp(-pred_proposal_covs)
* smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
beta=self.smooth_l1_beta,
)
)
loss_covariance_regularize = 0.5 * pred_proposal_covs
loss_box_reg += loss_covariance_regularize
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
else:
# Multivariate Gaussian Negative Log Likelihood loss using pytorch
# distributions.multivariate_normal.log_prob()
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky
)
)
loss_box_reg = -multivariate_normal_dists.log_prob(
gt_proposals_delta
)
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
elif self.bbox_cov_loss == "second_moment_matching":
# Compute regression covariance using second moment
# matching.
loss_box_reg = smooth_l1_loss(
pred_proposal_deltas, gt_proposals_delta, self.smooth_l1_beta
)
errors = pred_proposal_deltas - gt_proposals_delta
if self.bbox_cov_type == "diagonal":
# Handel diagonal case
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_proposal_covs),
errors ** 2,
beta=self.smooth_l1_beta,
)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
else:
# Handel full covariance case
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(
errors, torch.transpose(errors, 2, 1)
)
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
predicted_covar = torch.matmul(
forecaster_cholesky,
torch.transpose(forecaster_cholesky, 2, 1),
)
second_moment_matching_term = smooth_l1_loss(
predicted_covar,
gt_error_covar,
beta=self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = (
torch.sum(loss_box_reg) + second_moment_matching_term
) / loss_reg_normalizer
elif self.bbox_cov_loss == "energy_loss":
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
# Define per-anchor Distributions
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky
)
)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,)
)
distributions_samples_1 = distributions_samples[
0 : self.bbox_cov_num_samples, :, :
]
distributions_samples_2 = distributions_samples[
1 : self.bbox_cov_num_samples + 1, :, :
]
# Compute energy score
loss_covariance_regularize = (
-smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
gt_proposals_delta.unsqueeze(0),
self.bbox_cov_num_samples,
dim=0,
)
loss_first_moment_match = (
2.0
* smooth_l1_loss(
distributions_samples_1,
gt_proposals_delta_samples,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # First term
# Final Loss
loss_box_reg = (
loss_first_moment_match + loss_covariance_regularize
) / loss_reg_normalizer
elif self.bbox_cov_loss == "pmb_negative_log_likelihood":
losses = self.nll_od_loss_with_nms(
predictions, proposals, gt_instances
)
loss_box_reg = losses["loss_box_reg"]
use_nll_loss = True
else:
raise ValueError(
"Invalid regression loss name {}.".format(self.bbox_cov_loss)
)
# Perform loss annealing. Not really essential in Generalized-RCNN case, but good practice for more
# elaborate regression variance losses.
standard_regression_loss = smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",
)
standard_regression_loss = (
standard_regression_loss / loss_reg_normalizer
)
probabilistic_loss_weight = get_probabilistic_loss_weight(
current_step, self.annealing_step
)
loss_box_reg = (
(1.0 - probabilistic_loss_weight) * standard_regression_loss
+ probabilistic_loss_weight * loss_box_reg
)
if use_nll_loss:
loss_cls = (1.0 - probabilistic_loss_weight) * loss_cls
else:
loss_box_reg = smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = loss_box_reg / loss_reg_normalizer
if use_nll_loss:
losses["loss_cls"] = loss_cls
losses["loss_box_reg"] = loss_box_reg
else:
losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
return losses
def nll_od_loss_with_nms(
self,
predictions: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
proposals: List[Instances],
gt_instances,
):
if "log_prob" in self.matching_distance and self.matching_distance != "log_prob":
covar_scaling = float(self.matching_distance.split("_")[-1])
matching_distance = "log_prob"
else:
covar_scaling = 1
matching_distance = self.matching_distance
self.ppp_intensity_function.update_distribution()
_, pred_deltas, _, pred_covs = predictions
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
scores = [score.clamp(1e-6, 1 - 1e-6) for score in scores]
_, num_classes = scores[0].shape
num_classes -= 1 # do not count background class
image_shapes = [x.image_size for x in proposals]
num_prop_per_image = [len(p) for p in proposals]
# Apply NMS without score threshold
instances, kept_idx = fast_rcnn_inference(
boxes,
scores,
image_shapes,
0.0,
self.test_nms_thresh,
self.test_topk_per_image,
)
kept_idx = [k.unique() for k in kept_idx]
pred_covs = pred_covs.split(num_prop_per_image)
pred_deltas = pred_deltas.split(num_prop_per_image)
kept_proposals = [
prop.proposal_boxes.tensor[idx] for prop, idx in zip(proposals, kept_idx)
]
pred_covs = [pred_cov[kept] for pred_cov, kept in zip(pred_covs, kept_idx)]
nll_pred_cov = [
covariance_output_to_cholesky(clamp_log_variance(reshape_box_preds(cov, num_classes)))
for cov in pred_covs
]
nll_scores = [score[kept] for score, kept in zip(scores, kept_idx)]
nll_pred_deltas = [
reshape_box_preds(delta[kept], num_classes)
for delta, kept in zip(pred_deltas, kept_idx)
]
trans_func = lambda x,y: self.box2box_transform.apply_deltas(x,y)
box_means = []
box_chols = []
bs = len(nll_pred_deltas)
for i in range(bs):
box_mean, box_chol = unscented_transform(nll_pred_deltas[i], nll_pred_cov[i], kept_proposals[i], trans_func)
box_means.append(box_mean)
box_chols.append(box_chol)
nll_gt_classes = [instances.gt_classes for instances in gt_instances]
gt_boxes = [instances.gt_boxes.tensor for instances in gt_instances]
if self.bbox_cov_dist_type == "gaussian":
regression_dist = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(
loc=x, scale_tril=y
)
)
elif self.bbox_cov_dist_type == "laplacian":
regression_dist = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2)
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
if self.use_prediction_mixture:
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
#max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
max_conf = 1 - pred_cls_probs[..., -1]
ppp_preds_idx = (
max_conf <= self.ppp_intensity_function.ppp_confidence_thres
)
props = kept_proposals[i][ppp_preds_idx.logical_not()]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
selected_chols = pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = selected_chols@(selected_chols.transpose(-1,-2))
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": selected_chols
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
selected_chols.diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
loss_ppp = PoissonPointUnion()
loss_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
loss_ppp.add_ppp(self.ppp_intensity_function)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
scale_mat = torch.eye(pred_box_chols.shape[-1]).to(pred_box_chols.device)*covar_scaling
scaled_chol = scale_mat@pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = (scaled_chol)@(scaled_chol.transpose(-1,-2))
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": scaled_chol
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
(scaled_chol).diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
match_ppp = PoissonPointUnion()
match_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
match_ppp.add_ppp(self.ppp_intensity_function)
ppps.append({"matching": match_ppp, "loss": loss_ppp})
src_boxes_tot.append(pred_box_means[ppp_preds_idx.logical_not()])
src_box_chol_tot.append(pred_box_chols[ppp_preds_idx.logical_not()])
src_scores_tot.append(pred_cls_probs[ppp_preds_idx.logical_not()])
src_boxes_deltas_tot.append(pred_box_deltas[ppp_preds_idx.logical_not()])
src_boxes_deltas_chol_tot.append(pred_box_delta_chols[ppp_preds_idx.logical_not()])
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
elif self.ppp_intensity_function.ppp_intensity_type == "gaussian_mixture":
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
props = kept_proposals[i]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
src_boxes_tot.append(pred_box_means)
src_box_chol_tot.append(pred_box_chols)
src_scores_tot.append(pred_cls_probs)
src_boxes_deltas_tot.append(pred_box_deltas)
src_boxes_deltas_chol_tot.append(pred_box_delta_chols)
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
else:
gt_box_deltas = []
for i in range(len(gt_boxes)):
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
kept_proposals[i],
gt_boxes[i][j].unsqueeze(0).repeat(len(kept_proposals[i]), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
use_target_delta_matching = True
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
nll_pred_delta_chols = nll_pred_cov
nll_pred_deltas = nll_pred_deltas
nll_pred_boxes = nll_pred_deltas
nll_pred_cov = nll_pred_cov
nll, associations, decompositions = negative_log_likelihood(
nll_scores,
nll_pred_boxes,
nll_pred_cov,
gt_boxes,
nll_gt_classes,
image_shapes,
regression_dist,
ppps,
self.nll_max_num_solutions,
scores_have_bg_cls=True,
target_deltas=gt_box_deltas,
matching_distance=matching_distance,
use_target_delta_matching=use_target_delta_matching,
pred_deltas=nll_pred_deltas,
pred_delta_chols=nll_pred_delta_chols,
)
# Save some stats
storage = get_event_storage()
num_classes = self.num_classes
mean_variance = np.mean(
[
cov.diagonal(dim1=-2,dim2=-1)
.pow(2)
.mean()
.item()
for cov in nll_pred_cov
if cov.shape[0] > 0
]
)
storage.put_scalar("nll/mean_covariance", mean_variance)
ppp_intens = np.sum([ppp["loss"].integrate(
torch.as_tensor(image_shapes).to(device), num_classes
)
.mean()
.item()
for ppp in ppps
])
storage.put_scalar("nll/ppp_intensity", ppp_intens)
reg_loss = np.mean(
[
np.clip(
decomp["matched_bernoulli_reg"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_match = np.mean(
[
np.clip(
decomp["matched_bernoulli_cls"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_no_match = np.mean(
[
np.clip(
decomp["unmatched_bernoulli"][0]
/ (decomp["num_unmatched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
# Collect all losses
losses = dict()
losses["loss_box_reg"] = nll
# Add losses for logging, these do not propagate gradients
losses["loss_regression"] = torch.tensor(reg_loss).to(nll.device)
losses["loss_cls_matched"] = torch.tensor(cls_loss_match).to(nll.device)
losses["loss_cls_unmatched"] = torch.tensor(cls_loss_no_match).to(nll.device)
return losses
def inference(self, predictions, proposals):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_boxes_for_gt_classes(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
scores, proposal_deltas = predictions
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
N, B = proposal_boxes.shape
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
K = predict_boxes.shape[1] // B
if K > 1:
gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes = gt_classes.clamp_(0, K - 1)
predict_boxes = predict_boxes.view(N, K, B)[
torch.arange(N, dtype=torch.long, device=predict_boxes.device),
gt_classes,
]
num_prop_per_image = [len(p) for p in proposals]
return predict_boxes.split(num_prop_per_image)
def predict_boxes(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
_, proposal_deltas, _, _ = predictions
num_prop_per_image = [len(p) for p in proposals]
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
return predict_boxes.split(num_prop_per_image)
def predict_probs(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
scores, _, _, _ = predictions
num_inst_per_image = [len(p) for p in proposals]
if self.cls_var_loss == "evidential":
alphas = get_dir_alphas(scores)
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Compute probabilities
probs = alphas / dirichlet_s
else:
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
# Todo: new detectron interface required copying code. Check for better
# way to inherit from FastRCNNConvFCHead.
@ROI_BOX_HEAD_REGISTRY.register()
class DropoutFastRCNNConvFCHead(nn.Module):
"""
A head with several 3x3 conv layers (each followed by norm & relu) and then
several fc layers (each followed by relu) and dropout.
"""
@configurable
def __init__(
self,
input_shape: ShapeSpec,
*,
conv_dims: List[int],
fc_dims: List[int],
conv_norm="",
dropout_rate,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature.
conv_dims (list[int]): the output dimensions of the conv layers
fc_dims (list[int]): the output dimensions of the fc layers
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
dropout_rate (float): p for dropout layer
"""
super().__init__()
assert len(conv_dims) + len(fc_dims) > 0
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self._output_size = (
input_shape.channels,
input_shape.height,
input_shape.width,
)
self.conv_norm_relus = []
for k, conv_dim in enumerate(conv_dims):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not conv_norm,
norm=get_norm(conv_norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
self.fcs_dropout = []
for k, fc_dim in enumerate(fc_dims):
fc = Linear(np.prod(self._output_size), fc_dim)
fc_dropout = nn.Dropout(p=self.dropout_rate)
self.add_module("fc{}".format(k + 1), fc)
self.add_module("fc_dropout{}".format(k + 1), fc_dropout)
self.fcs.append(fc)
self.fcs_dropout.append(fc_dropout)
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
@classmethod
def from_config(cls, cfg, input_shape):
num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
return {
"input_shape": input_shape,
"conv_dims": [conv_dim] * num_conv,
"fc_dims": [fc_dim] * num_fc,
"conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,
}
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
for layer, dropout in zip(self.fcs, self.fcs_dropout):
x = F.relu(dropout(layer(x)))
return x
@property
def output_shape(self):
"""
Returns:
ShapeSpec: the output feature shape
"""
o = self._output_size
if isinstance(o, int):
return ShapeSpec(channels=o)
else:
return ShapeSpec(channels=o[0], height=o[1], width=o[2])
| 66,644
| 40.523364
| 159
|
py
|
pmb-nll
|
pmb-nll-main/src/probabilistic_modeling/probabilistic_detr.py
|
import numpy as np
import torch
import torch.nn.functional as F
# Detectron imports
from detectron2.modeling import META_ARCH_REGISTRY, detector_postprocess
from detectron2.utils.events import get_event_storage
# Detr imports
from models.detr import DETR, MLP, SetCriterion
from torch import distributions, nn
from torch._C import device
from util import box_ops
from util.misc import NestedTensor, accuracy, nested_tensor_from_tensor_list
from probabilistic_modeling.losses import negative_log_likelihood
# Project imports
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessIntensityFunction, clamp_log_variance,
covariance_output_to_cholesky, get_probabilistic_loss_weight, PoissonPointUnion)
@META_ARCH_REGISTRY.register()
class ProbabilisticDetr(META_ARCH_REGISTRY.get("Detr")):
"""
Implement Probabilistic Detr
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != "none"
self.cls_var_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
)
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != "none"
self.bbox_cov_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
)
self.bbox_cov_dist_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
self.bbox_cov_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
)
if self.bbox_cov_type == "diagonal":
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.current_step = 0
self.annealing_step = (
cfg.SOLVER.STEPS[0]
if cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP <= 0
else cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP
)
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
ppp_intensity_function = lambda x: PoissonPointProcessIntensityFunction(
cfg, device=self.device, **x
)
self.nll_max_num_solutions = (
cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS
)
else:
ppp_intensity_function = None
self.nll_max_num_solutions = 0
# Create probabilistic output layers
self.detr = CustomDetr(
self.detr.backbone,
self.detr.transformer,
num_classes=self.num_classes,
num_queries=self.detr.num_queries,
aux_loss=self.detr.aux_loss,
compute_cls_var=self.compute_cls_var,
compute_bbox_cov=self.compute_bbox_cov,
bbox_cov_dims=self.bbox_cov_dims,
)
self.detr.to(self.device)
losses = ["cardinality"]
if self.compute_cls_var:
losses.append("labels_" + self.cls_var_loss)
elif not self.bbox_cov_loss == "pmb_negative_log_likelihood":
losses.append("labels")
if self.compute_bbox_cov:
losses.append("boxes_" + self.bbox_cov_loss)
else:
losses.append("boxes")
# Replace setcriterion with our own implementation
self.criterion = ProbabilisticSetCriterion(
self.num_classes,
matcher=self.criterion.matcher,
weight_dict=self.criterion.weight_dict,
eos_coef=self.criterion.eos_coef,
losses=losses,
nll_max_num_solutions=self.nll_max_num_solutions,
ppp=ppp_intensity_function,
bbox_cov_dist_type=self.bbox_cov_dist_type,
matching_distance=cfg.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE,
use_prediction_mixture=cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE,
)
self.criterion.set_bbox_cov_num_samples(self.bbox_cov_num_samples)
self.criterion.set_cls_var_num_samples(self.cls_var_num_samples)
self.criterion.to(self.device)
self.input_format = "RGB"
def get_ppp_intensity_function(self):
return self.criterion.ppp_intensity_function
def forward(self, batched_inputs, return_raw_results=False, is_mc_dropout=False):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_raw_results (bool): if True return unprocessed results for probabilistic inference.
is_mc_dropout (bool): if True, return unprocessed results even if self.is_training flag is on.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
try:
self.current_step += get_event_storage().iter
except:
self.current_step += 1
images = self.preprocess_image(batched_inputs)
output = self.detr(images)
if self.training and not is_mc_dropout:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
prob_weight = get_probabilistic_loss_weight(
self.current_step, self.annealing_step
)
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
if not "loss" in k: # some "losses" are here for logging purposes only
probabilistic_loss_weight = 1
elif "nll" in k:
probabilistic_loss_weight = prob_weight
else:
probabilistic_loss_weight = 1 - prob_weight
# uncomment for weighted prob loss
# loss_dict[k] *= probabilistic_loss_weight
return loss_dict
elif return_raw_results:
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
output["ppp"] = self.criterion.ppp_intensity_function.get_weights()
return output
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
mask_pred = output["pred_masks"] if self.mask_on else None
results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
class CustomDetr(DETR):
"""This is the DETR module that performs PROBABILISTIC object detection"""
def __init__(
self,
backbone,
transformer,
num_classes,
num_queries,
aux_loss=False,
compute_cls_var=False,
compute_bbox_cov=False,
bbox_cov_dims=4,
):
super().__init__(backbone, transformer, num_classes, num_queries, aux_loss)
hidden_dim = self.transformer.d_model
self.compute_cls_var = compute_cls_var
if self.compute_cls_var:
self.class_var_embed = nn.Linear(hidden_dim, num_classes + 1)
nn.init.normal_(self.class_var_embed.weight, std=0.0001)
nn.init.constant_(self.class_var_embed.bias, 2 * np.log(0.01))
self.compute_bbox_cov = compute_bbox_cov
if self.compute_bbox_cov:
self.bbox_covar_embed = MLP(hidden_dim, hidden_dim, bbox_cov_dims, 3)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(
self.input_proj(src), mask, self.query_embed.weight, pos[-1]
)[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
# Only change to detr code happens here. We need to expose the features from
# the transformer to compute variance parameters.
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.compute_cls_var:
cls_var_out = self.class_var_embed(hs[-1])
out.update({"pred_logits_var": cls_var_out})
if self.compute_bbox_cov:
bbox_cov_out = self.bbox_covar_embed(hs)
out.update({"pred_boxes_cov": bbox_cov_out[-1]})
else:
bbox_cov_out = None
if self.aux_loss:
out["aux_outputs"] = self._set_aux_loss(
outputs_class, outputs_coord, bbox_cov_out
)
return out
def _set_aux_loss(self, outputs_class, outputs_coord, bbox_cov_out=None):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if bbox_cov_out is None:
return [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
else:
return [
{"pred_logits": a, "pred_boxes": b, "pred_boxes_cov": c}
for a, b, c in zip(
outputs_class[:-1], outputs_coord[:-1], bbox_cov_out[:-1]
)
]
class ProbabilisticSetCriterion(SetCriterion):
"""
This is custom set criterion to allow probabilistic estimates
"""
def __init__(
self,
num_classes,
matcher,
weight_dict,
eos_coef,
losses,
nll_max_num_solutions,
ppp,
bbox_cov_dist_type,
matching_distance,
use_prediction_mixture,
):
super().__init__(num_classes, matcher, weight_dict, eos_coef, losses)
self.probabilistic_loss_weight = 0.0
self.bbox_cov_num_samples = 1000
self.cls_var_num_samples = 1000
self.nll_max_num_solutions = nll_max_num_solutions
self.ppp_intensity_function = ppp({})
self.ppp_constructor = ppp
self.bbox_cov_dist_type = bbox_cov_dist_type
self.matching_distance = matching_distance
self.use_prediction_mixture = use_prediction_mixture
def set_bbox_cov_num_samples(self, bbox_cov_num_samples):
self.bbox_cov_num_samples = bbox_cov_num_samples
def set_cls_var_num_samples(self, cls_var_num_samples):
self.cls_var_num_samples = cls_var_num_samples
def loss_labels_att(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL + Loss attenuation)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
outputs must contain the mean pred_logits and the variance pred_logits_var
"""
if "pred_logits_var" not in outputs:
return self.loss_labels(outputs, targets, indices, num_boxes, log)
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
src_logits_var = outputs["pred_logits_var"]
src_logits_var = torch.sqrt(torch.exp(src_logits_var))
univariate_normal_dists = distributions.normal.Normal(
src_logits, scale=src_logits_var
)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(self.cls_var_num_samples,)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
pred_class_stochastic_logits.shape[1],
pred_class_stochastic_logits.shape[2]
* pred_class_stochastic_logits.shape[0],
-1,
)
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
target_classes = torch.unsqueeze(target_classes, dim=0)
target_classes = torch.repeat_interleave(
target_classes, self.cls_var_num_samples, dim=0
)
target_classes = target_classes.view(
target_classes.shape[1], target_classes.shape[2] * target_classes.shape[0]
)
loss_ce = F.cross_entropy(
pred_class_stochastic_logits.transpose(1, 2),
target_classes,
self.empty_weight,
)
losses = {"loss_ce": loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this
# one here
losses["class_error"] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def loss_boxes_var_nll(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the nll probabilistic regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
src_vars = clamp_log_variance(outputs["pred_boxes_cov"][idx])
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
if src_vars.shape[1] == 4:
loss_nll = 0.5 * torch.exp(-src_vars) * loss_bbox + 0.5 * src_vars
else:
forecaster_cholesky = covariance_output_to_cholesky(src_vars)
if forecaster_cholesky.shape[0] != 0:
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
src_boxes, scale_tril=forecaster_cholesky
)
)
loss_nll = -multivariate_normal_dists.log_prob(target_boxes)
else:
loss_nll = loss_bbox
loss_nll_final = loss_nll.sum() / num_boxes
# Collect all losses
losses = dict()
losses["loss_bbox"] = loss_nll_final
# Add iou loss
losses = update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_boxes_energy(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the energy distance loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
# Begin probabilistic loss computation
src_vars = clamp_log_variance(outputs["pred_boxes_cov"][idx])
forecaster_cholesky = covariance_output_to_cholesky(src_vars)
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
src_boxes, scale_tril=forecaster_cholesky
)
)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,)
)
distributions_samples_1 = distributions_samples[
0 : self.bbox_cov_num_samples, :, :
]
distributions_samples_2 = distributions_samples[
1 : self.bbox_cov_num_samples + 1, :, :
]
# Compute energy score. Smooth L1 loss is preferred in this case to
# maintain the proper scoring properties.
loss_covariance_regularize = (
-F.l1_loss(
distributions_samples_1, distributions_samples_2, reduction="sum"
)
/ self.bbox_cov_num_samples
) # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
target_boxes.unsqueeze(0), self.bbox_cov_num_samples, dim=0
)
loss_first_moment_match = (
2
* F.l1_loss(
distributions_samples_1, gt_proposals_delta_samples, reduction="sum"
)
/ self.bbox_cov_num_samples
) # First term
loss_energy = loss_first_moment_match + loss_covariance_regularize
# Normalize and add losses
loss_energy_final = loss_energy.sum() / num_boxes
# Collect all losses
losses = dict()
losses["loss_bbox"] = loss_energy_final
# Add iou loss
losses = update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_boxes_smm(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss, SMM variance and Covariance loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
# Begin probabilistic loss computation
src_vars = clamp_log_variance(outputs["pred_boxes_cov"][idx])
errors = src_boxes - target_boxes
if src_vars.shape[1] == 4:
second_moment_matching_term = F.l1_loss(
torch.exp(src_vars), errors ** 2, reduction="none"
)
else:
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(src_vars)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(forecaster_cholesky, 2, 1)
)
second_moment_matching_term = F.l1_loss(
predicted_covar, gt_error_covar, reduction="none"
)
loss_smm = second_moment_matching_term.sum() / num_boxes
# Normalize and add losses
loss_bbox_final = loss_bbox.sum() / num_boxes
loss_smm_final = loss_smm + loss_bbox_final
# Collect all losses
losses = dict()
losses["loss_bbox"] = loss_smm_final
# Add iou loss
losses = update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_pmb_nll(self, outputs, targets, indices, num_boxes):
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
src_scores = src_logits.softmax(-1).clamp(1e-6, 1 - 1e-6)
num_classes = src_scores.shape[-1] - 1
assert "pred_boxes" in outputs
src_boxes = outputs["pred_boxes"]
src_boxes = src_boxes.unsqueeze(2).repeat(1, 1, num_classes, 1)
assert "pred_boxes_cov" in outputs
src_box_cov = outputs["pred_boxes_cov"]
src_box_chol = covariance_output_to_cholesky(src_box_cov)
src_box_chol = src_box_chol.unsqueeze(2).repeat(1, 1, num_classes, 1, 1)
tgt_classes = [t["labels"] for t in targets]
tgt_boxes = [t["boxes"] for t in targets]
self.ppp_intensity_function.update_distribution()
if self.bbox_cov_dist_type == "gaussian":
regression_dist = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(
loc=x, scale_tril=y
)
)
elif self.bbox_cov_dist_type == "laplacian":
regression_dist = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=(y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2))
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
if "log_prob" in self.matching_distance and self.matching_distance != "log_prob":
covar_scaling = float(self.matching_distance.split("_")[-1])
matching_distance = "log_prob"
else:
covar_scaling = 1
matching_distance = self.matching_distance
bs = src_logits.shape[0]
image_shapes = torch.as_tensor([[1, 1] for i in range(bs)]).to(src_boxes.device)
if self.use_prediction_mixture:
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_scores_tot = []
for i in range(bs):
pred_box_means = src_boxes[i]
pred_box_chols = src_box_chol[i]
pred_cls_probs = src_scores[i]
#max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
max_conf = 1 - pred_cls_probs[..., -1]
ppp_preds_idx = (
max_conf <= self.ppp_intensity_function.ppp_confidence_thres
)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
mixture_dict["covs"] = pred_box_chols[ppp_preds_idx, 0]@pred_box_chols[ppp_preds_idx, 0].transpose(-1,-2)
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": pred_box_chols[ppp_preds_idx, 0]
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
pred_box_chols[ppp_preds_idx, 0].diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
loss_ppp = PoissonPointUnion()
loss_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
loss_ppp.add_ppp(self.ppp_intensity_function)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
scale_mat = torch.eye(pred_box_chols.shape[-1]).to(pred_box_chols.device)*covar_scaling
scaled_cov = scale_mat@pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = (scaled_cov)@(scaled_cov).transpose(-1,-2)
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": scale_mat@pred_box_chols[ppp_preds_idx, 0]
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
(scale_mat@pred_box_chols[ppp_preds_idx, 0]).diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
match_ppp = PoissonPointUnion()
match_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
match_ppp.add_ppp(self.ppp_intensity_function)
ppps.append({"matching": match_ppp, "loss": loss_ppp})
src_boxes_tot.append(pred_box_means[ppp_preds_idx.logical_not()])
src_box_chol_tot.append(pred_box_chols[ppp_preds_idx.logical_not()])
src_scores_tot.append(pred_cls_probs[ppp_preds_idx.logical_not()])
src_boxes = src_boxes_tot
src_box_chol = src_box_chol_tot
src_scores = src_scores_tot
elif self.ppp_intensity_function.ppp_intensity_type == "gaussian_mixture":
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
else:
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
nll, associations, decompositions = negative_log_likelihood(
src_scores,
src_boxes,
src_box_chol,
tgt_boxes,
tgt_classes,
image_shapes,
regression_dist,
ppps,
self.nll_max_num_solutions,
scores_have_bg_cls=True,
matching_distance=matching_distance,
covar_scaling=covar_scaling
)
# Save some stats
storage = get_event_storage()
num_classes = self.num_classes
mean_variance = np.mean(
[
cov.diagonal(dim1=-2,dim2=-1)
.pow(2)
.mean()
.item()
for cov in src_box_chol
if cov.shape[0] > 0
]
)
storage.put_scalar("nll/mean_covariance", mean_variance)
ppp_intens = np.sum([ppp["loss"].integrate(
image_shapes, num_classes
)
.mean()
.item()
for ppp in ppps
])
storage.put_scalar("nll/ppp_intensity", ppp_intens)
reg_loss = np.mean(
[
np.clip(
decomp["matched_bernoulli_reg"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_match = np.mean(
[
np.clip(
decomp["matched_bernoulli_cls"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_no_match = np.mean(
[
np.clip(
decomp["unmatched_bernoulli"][0]
/ (decomp["num_unmatched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
# Collect all losses
losses = dict()
losses["loss_nll"] = nll
# Add losses for logging, these do not propagate gradients
losses["regression_matched_nll"] = torch.tensor(reg_loss).to(nll.device)
losses["cls_matched_nll"] = torch.tensor(cls_loss_match).to(nll.device)
losses["cls_unmatched_nll"] = torch.tensor(cls_loss_no_match).to(nll.device)
# Extract matched boxes
iou_src_boxes = []
iou_target_boxes = []
for i, association in enumerate(associations):
association = torch.as_tensor(association).to(src_boxes[i].device).long()
permutation_association = association[
0, association[0, :, 1] >= 0
] # select all predictions associated with GT
permutation_association = permutation_association[
permutation_association[:, 0] < src_boxes[i].shape[0]
]
iou_src_boxes.append(src_boxes[i][permutation_association[:, 0], 0])
iou_target_boxes.append(tgt_boxes[i][permutation_association[:, 1]])
# Add iou loss
losses = update_with_iou_loss(
losses, torch.cat(iou_src_boxes), torch.cat(iou_target_boxes), num_boxes
)
return losses
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
"labels": self.loss_labels,
"labels_loss_attenuation": self.loss_labels_att,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
"boxes_negative_log_likelihood": self.loss_boxes_var_nll,
"boxes_energy_loss": self.loss_boxes_energy,
"boxes_second_moment_matching": self.loss_boxes_smm,
"boxes_pmb_negative_log_likelihood": self.loss_pmb_nll,
"masks": self.loss_masks,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes):
loss_giou = 1 - torch.diag(
box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes),
)
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
| 31,909
| 38.541512
| 135
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/main.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
if epoch % 50 == 0:
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 11,532
| 45.317269
| 116
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/engine.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
panoptic_evaluator = None
if 'panoptic' in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if panoptic_res is not None:
stats['PQ_all'] = panoptic_res["All"]
stats['PQ_th'] = panoptic_res["Things"]
stats['PQ_st'] = panoptic_res["Stuff"]
return stats, coco_evaluator
| 6,626
| 42.598684
| 103
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/hubconf.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
dependencies = ["torch", "torchvision"]
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet50_dc5_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet101_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
| 6,265
| 36.076923
| 117
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/run_with_submitit.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as detection
self._setup_gpu_args()
detection.main(self.args)
def checkpoint(self):
import os
import submitit
from pathlib import Path
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
# cluster setup is defined by environment variables
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
)
executor.update_parameters(name="detr")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| 3,476
| 30.044643
| 102
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/test_all.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import unittest
import torch
from torch import nn, Tensor
from typing import List
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned
from models.backbone import Backbone, Joiner, BackboneBase
from util import box_ops
from util.misc import nested_tensor_from_tensor_list
from hubconf import detr_resnet50, detr_resnet50_panoptic
# onnxruntime requires python 3.5 or above
try:
import onnxruntime
except ImportError:
onnxruntime = None
class Tester(unittest.TestCase):
def test_box_cxcywh_to_xyxy(self):
t = torch.rand(10, 4)
r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t))
self.assertLess((t - r).abs().max(), 1e-5)
@staticmethod
def indices_torch2python(indices):
return [(i.tolist(), j.tolist()) for i, j in indices]
def test_hungarian(self):
n_queries, n_targets, n_classes = 100, 15, 91
logits = torch.rand(1, n_queries, n_classes + 1)
boxes = torch.rand(1, n_queries, 4)
tgt_labels = torch.randint(high=n_classes, size=(n_targets,))
tgt_boxes = torch.rand(n_targets, 4)
matcher = HungarianMatcher()
targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}]
indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets)
indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2)
self.assertEqual(len(indices_single[0][0]), n_targets)
self.assertEqual(len(indices_single[0][1]), n_targets)
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[0]]))
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[1]]))
# test with empty targets
tgt_labels_empty = torch.randint(high=n_classes, size=(0,))
tgt_boxes_empty = torch.rand(0, 4)
targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}]
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty)
self.assertEqual(len(indices[1][0]), 0)
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2)
self.assertEqual(len(indices[0][0]), 0)
def test_position_encoding_script(self):
m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned()
mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa
def test_backbone_script(self):
backbone = Backbone('resnet50', True, False, False)
torch.jit.script(backbone) # noqa
def test_model_script_detection(self):
model = detr_resnet50(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
def test_model_script_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"]))
def test_model_detection_different_inputs(self):
model = detr_resnet50(pretrained=False).eval()
# support NestedTensor
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
self.assertIn('pred_logits', out)
# and 4d Tensor
x = torch.rand(1, 3, 200, 200)
out = model(x)
self.assertIn('pred_logits', out)
# and List[Tensor[C, H, W]]
x = torch.rand(3, 200, 200)
out = model([x])
self.assertIn('pred_logits', out)
def test_warpped_model_script_detection(self):
class WrappedDETR(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inputs: List[Tensor]):
sample = nested_tensor_from_tensor_list(inputs)
return self.model(sample)
model = detr_resnet50(pretrained=False)
wrapped_model = WrappedDETR(model)
wrapped_model.eval()
scripted_model = torch.jit.script(wrapped_model)
x = [torch.rand(3, 200, 200), torch.rand(3, 200, 250)]
out = wrapped_model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable')
class ONNXExporterTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
torch.manual_seed(123)
def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None,
output_names=None, input_names=None):
model.eval()
onnx_io = io.BytesIO()
# export to onnx with the first input
torch.onnx.export(model, inputs_list[0], onnx_io,
do_constant_folding=do_constant_folding, opset_version=12,
dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names)
# validate the exported model with onnx runtime
for test_inputs in inputs_list:
with torch.no_grad():
if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list):
test_inputs = (nested_tensor_from_tensor_list(test_inputs),)
test_ouputs = model(*test_inputs)
if isinstance(test_ouputs, torch.Tensor):
test_ouputs = (test_ouputs,)
self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch)
def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False):
inputs, _ = torch.jit._flatten(inputs)
outputs, _ = torch.jit._flatten(outputs)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, inputs))
outputs = list(map(to_numpy, outputs))
ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())
# compute onnxruntime output prediction
ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))
ort_outs = ort_session.run(None, ort_inputs)
for i in range(0, len(outputs)):
try:
torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)
except AssertionError as error:
if tolerate_small_mismatch:
self.assertIn("(0.00%)", str(error), str(error))
else:
raise
def test_model_onnx_detection(self):
model = detr_resnet50(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes"],
tolerate_small_mismatch=True,
)
@unittest.skip("CI doesn't have enough memory")
def test_model_onnx_detection_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes", "pred_masks"],
tolerate_small_mismatch=True,
)
if __name__ == '__main__':
unittest.main()
| 8,804
| 40.928571
| 119
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/models/detr.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss, sigmoid_focal_loss)
from .transformer import build_transformer
class DETR(nn.Module):
""" This is the DETR module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build(args):
# the `num_classes` naming here is somewhat misleading.
# it indeed corresponds to `max_obj_id + 1`, where max_obj_id
# is the maximum id for a class in your dataset. For example,
# COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
# As another example, for a dataset that has a single class with id 1,
# you should pass `num_classes` to be 2 (max_obj_id + 1).
# For more details on this, check the following discussion
# https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
num_classes = 20 if args.dataset_file != 'coco' else 91
if args.dataset_file == "coco_panoptic":
# for panoptic, we just add a num_classes that is large enough to hold
# max_obj_id + 1, but the exact value doesn't really matter
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'boxes', 'cardinality']
if args.masks:
losses += ["masks"]
criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=args.eos_coef, losses=losses)
criterion.to(device)
postprocessors = {'bbox': PostProcess()}
if args.masks:
postprocessors['segm'] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
return model, criterion, postprocessors
| 17,088
| 46.469444
| 113
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/models/matcher.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
| 4,250
| 47.862069
| 119
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/models/segmentation.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
try:
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)
self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
assert mask is not None
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])
outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])
out["pred_masks"] = outputs_seg_masks
return out
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]):
x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask: Optional[Tensor] = None):
q = self.q_linear(q)
k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class PostProcessSegm(nn.Module):
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
@torch.no_grad()
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False)
outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
).byte()
return results
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
| 15,573
| 41.785714
| 120
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/models/position_encoding.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 3,336
| 36.077778
| 103
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/models/backbone.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| 4,437
| 35.983333
| 113
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/models/transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 12,162
| 39.815436
| 98
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .detr import build
def build_model(args):
return build(args)
| 143
| 19.571429
| 70
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/d2/converter.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version.
"""
import json
import argparse
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser("D2 model converter")
parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert")
parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model")
return parser.parse_args()
def main():
args = parse_args()
# D2 expects contiguous classes, so we need to remap the 92 classes from DETR
# fmt: off
coco_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91]
# fmt: on
coco_idx = np.array(coco_idx)
if args.source_model.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(args.source_model, map_location="cpu", check_hash=True)
else:
checkpoint = torch.load(args.source_model, map_location="cpu")
model_to_convert = checkpoint["model"]
model_converted = {}
for k in model_to_convert.keys():
old_k = k
if "backbone" in k:
k = k.replace("backbone.0.body.", "")
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace(f"layer{t}", f"res{t + 1}")
for t in [1, 2, 3]:
k = k.replace(f"bn{t}", f"conv{t}.norm")
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
k = "backbone.0.backbone." + k
k = "detr." + k
print(old_k, "->", k)
if "class_embed" in old_k:
v = model_to_convert[old_k].detach()
if v.shape[0] == 92:
shape_old = v.shape
model_converted[k] = v[coco_idx]
print("Head conversion: changing shape from {} to {}".format(shape_old, model_converted[k].shape))
continue
model_converted[k] = model_to_convert[old_k].detach()
model_to_save = {"model": model_converted}
torch.save(model_to_save, args.output_model)
if __name__ == "__main__":
main()
| 2,590
| 36.014286
| 114
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/d2/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import sys
import itertools
# fmt: off
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
import time
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from d2.detr import DetrDatasetMapper, add_detr_config
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from detectron2.solver.build import maybe_add_gradient_clipping
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to DETR.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, cfg, True, output_folder)
@classmethod
def build_train_loader(cls, cfg):
if "Detr" == cfg.MODEL.META_ARCHITECTURE:
mapper = DetrDatasetMapper(cfg, True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_optimizer(cls, cfg, model):
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for key, value in model.named_parameters(recurse=True):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "backbone" in key:
lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_detr_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,999
| 33.246575
| 115
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/d2/detr/detr.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import List
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from detectron2.layers import ShapeSpec
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks
from detectron2.utils.logger import log_first_n
from fvcore.nn import giou_loss, smooth_l1_loss
from models.backbone import Joiner
from models.detr import DETR, SetCriterion
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine
from models.transformer import Transformer
from models.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm
from util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from util.misc import NestedTensor
from datasets.coco import convert_coco_poly_to_mask
__all__ = ["Detr"]
class MaskedBackbone(nn.Module):
""" This is a thin wrapper around D2's backbone to provide padding masking"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()]
self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels
def forward(self, images):
features = self.backbone(images.tensor)
masks = self.mask_out_padding(
[features_per_level.shape for features_per_level in features.values()],
images.image_sizes,
images.tensor.device,
)
assert len(features) == len(masks)
for i, k in enumerate(features.keys()):
features[k] = NestedTensor(features[k], masks[i])
return features
def mask_out_padding(self, feature_shapes, image_sizes, device):
masks = []
assert len(feature_shapes) == len(self.feature_strides)
for idx, shape in enumerate(feature_shapes):
N, _, H, W = shape
masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device)
for img_idx, (h, w) in enumerate(image_sizes):
masks_per_feature_level[
img_idx,
: int(np.ceil(float(h) / self.feature_strides[idx])),
: int(np.ceil(float(w) / self.feature_strides[idx])),
] = 0
masks.append(masks_per_feature_level)
return masks
@META_ARCH_REGISTRY.register()
class Detr(nn.Module):
"""
Implement Detr
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
self.mask_on = cfg.MODEL.MASK_ON
hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
# Transformer parameters:
nheads = cfg.MODEL.DETR.NHEADS
dropout = cfg.MODEL.DETR.DROPOUT
dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
enc_layers = cfg.MODEL.DETR.ENC_LAYERS
dec_layers = cfg.MODEL.DETR.DEC_LAYERS
pre_norm = cfg.MODEL.DETR.PRE_NORM
# Loss parameters:
giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT
l1_weight = cfg.MODEL.DETR.L1_WEIGHT
deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT
N_steps = hidden_dim // 2
d2_backbone = MaskedBackbone(cfg)
backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True))
backbone.num_channels = d2_backbone.num_channels
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
)
self.detr = DETR(
backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision
)
if self.mask_on:
frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS
if frozen_weights != '':
print("LOAD pre-trained weights")
weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model']
new_weight = {}
for k, v in weight.items():
if 'detr.' in k:
new_weight[k.replace('detr.', '')] = v
else:
print(f"Skipping loading weight {k} from frozen model")
del weight
self.detr.load_state_dict(new_weight)
del new_weight
self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != ''))
self.seg_postprocess = PostProcessSegm
self.detr.to(self.device)
# building criterion
matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight)
weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight}
weight_dict["loss_giou"] = giou_weight
if deep_supervision:
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
if self.mask_on:
losses += ["masks"]
self.criterion = SetCriterion(
self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses,
)
self.criterion.to(self.device)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
output = self.detr(images)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
return loss_dict
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
mask_pred = output["pred_masks"] if self.mask_on else None
results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def prepare_targets(self, targets):
new_targets = []
for targets_per_image in targets:
h, w = targets_per_image.image_size
image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)
gt_classes = targets_per_image.gt_classes
gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy
gt_boxes = box_xyxy_to_cxcywh(gt_boxes)
new_targets.append({"labels": gt_classes, "boxes": gt_boxes})
if self.mask_on and hasattr(targets_per_image, 'gt_masks'):
gt_masks = targets_per_image.gt_masks
gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
new_targets[-1].update({'masks': gt_masks})
return new_targets
def inference(self, box_cls, box_pred, mask_pred, image_sizes):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_queries, K).
The tensor predicts the classification probability for each query.
box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every queryx
image_sizes (List[torch.Size]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
assert len(box_cls) == len(image_sizes)
results = []
# For each box we assign the best class or the second best if the best on is `no_object`.
scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)
for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip(
scores, labels, box_pred, image_sizes
)):
result = Instances(image_size)
result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))
result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])
if self.mask_on:
mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False)
mask = mask[0].sigmoid() > 0.5
B, N, H, W = mask_pred.shape
mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32)
result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device)
result.scores = scores_per_image
result.pred_classes = labels_per_image
results.append(result)
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs]
images = ImageList.from_tensors(images)
return images
| 11,143
| 41.534351
| 118
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/d2/detr/dataset_mapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.mask_on = cfg.MODEL.MASK_ON
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 4,570
| 36.162602
| 111
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/d2/detr/config.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_detr_config(cfg):
"""
Add config for DETR.
"""
cfg.MODEL.DETR = CN()
cfg.MODEL.DETR.NUM_CLASSES = 80
# For Segmentation
cfg.MODEL.DETR.FROZEN_WEIGHTS = ''
# LOSS
cfg.MODEL.DETR.GIOU_WEIGHT = 2.0
cfg.MODEL.DETR.L1_WEIGHT = 5.0
cfg.MODEL.DETR.DEEP_SUPERVISION = True
cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1
# TRANSFORMER
cfg.MODEL.DETR.NHEADS = 8
cfg.MODEL.DETR.DROPOUT = 0.1
cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048
cfg.MODEL.DETR.ENC_LAYERS = 6
cfg.MODEL.DETR.DEC_LAYERS = 6
cfg.MODEL.DETR.PRE_NORM = False
cfg.MODEL.DETR.HIDDEN_DIM = 256
cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
| 888
| 24.4
| 70
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/d2/detr/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_detr_config
from .detr import Detr
from .dataset_mapper import DetrDatasetMapper
| 176
| 34.4
| 70
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/util/plot_utils.py
|
"""
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
else:
raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}")
# Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir
for i, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
if not dir.exists():
raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
# verify log_name exists
fn = Path(dir / log_name)
if not fn.exists():
print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?")
print(f"--> full path of missing log file: {fn}")
return
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(
np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1]
).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs
| 4,514
| 40.805556
| 120
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/util/misc.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__.split(".")[1]) < 7.0:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__.split(".")[1]) < 7.0:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| 15,304
| 31.702991
| 116
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/util/box_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| 2,561
| 27.786517
| 110
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/util/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 71
| 35
| 70
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
from .coco import build as build_coco
def get_coco_api_from_dataset(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
def build_dataset(image_set, args):
if args.dataset_file == 'coco':
return build_coco(image_set, args)
if args.dataset_file == 'coco_panoptic':
# to avoid making panopticapi required for coco
from .coco_panoptic import build as build_coco_panoptic
return build_coco_panoptic(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
| 897
| 33.538462
| 70
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/datasets/coco_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib
"""
import os
import contextlib
import copy
import numpy as np
import torch
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
| 8,735
| 32.860465
| 103
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/datasets/coco_panoptic.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
from util.box_ops import masks_to_boxes
from .coco import make_coco_transforms
class CocoPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
with open(ann_file, 'r') as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id'])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco['images'], self.coco['annotations']):
assert img['file_name'][:-4] == ann['file_name'][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx]
img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg')
ann_path = Path(self.ann_folder) / ann_info['file_name']
img = Image.open(img_path).convert('RGB')
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann['id'] for ann in ann_info['segments_info']])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64)
target = {}
target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
if self.return_masks:
target['masks'] = masks
target['labels'] = labels
target["boxes"] = masks_to_boxes(masks)
target['size'] = torch.as_tensor([int(h), int(w)])
target['orig_size'] = torch.as_tensor([int(h), int(w)])
if "segments_info" in ann_info:
for name in ['iscrowd', 'area']:
target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.coco['images'])
def get_height_and_width(self, idx):
img_info = self.coco['images'][idx]
height = img_info['height']
width = img_info['width']
return height, width
def build(image_set, args):
img_folder_root = Path(args.coco_path)
ann_folder_root = Path(args.coco_panoptic_path)
assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist'
assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist'
mode = 'panoptic'
PATHS = {
"train": ("train2017", Path("annotations") / f'{mode}_train2017.json'),
"val": ("val2017", Path("annotations") / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
img_folder_path = img_folder_root / img_folder
ann_folder = ann_folder_root / f'{mode}_{img_folder}'
ann_file = ann_folder_root / ann_file
dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file,
transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset
| 3,723
| 36.24
| 111
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/datasets/coco.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
"""
from pathlib import Path
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
import datasets.transforms as T
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
def make_coco_transforms(image_set):
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
if image_set == 'train':
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose([
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'val':
return T.Compose([
T.RandomResize([800], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
def build(image_set, args):
root = Path(args.coco_path)
assert root.exists(), f'provided COCO path {root} does not exist'
mode = 'instances'
PATHS = {
"train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
"val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset
| 5,253
| 32.044025
| 118
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/datasets/panoptic_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import util.misc as utils
try:
from panopticapi.evaluation import pq_compute
except ImportError:
pass
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
self.gt_json = ann_file
self.gt_folder = ann_folder
if utils.is_main_process():
if not os.path.exists(output_dir):
os.mkdir(output_dir)
self.output_dir = output_dir
self.predictions = []
def update(self, predictions):
for p in predictions:
with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = utils.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if utils.is_main_process():
json_data = {"annotations": self.predictions}
predictions_json = os.path.join(self.output_dir, "predictions.json")
with open(predictions_json, "w") as f:
f.write(json.dumps(json_data))
return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
return None
| 1,493
| 32.2
| 116
|
py
|
pmb-nll
|
pmb-nll-main/src/detr/datasets/transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Transforms and data augmentation for both image + bbox.
"""
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target['masks'] = interpolate(
target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image.size[::-1])
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
return padded_image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, target, region)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
return self.eraser(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| 8,524
| 29.776173
| 104
|
py
|
pmb-nll
|
pmb-nll-main/src/offline_evaluation/compute_probabilistic_metrics.py
|
import json
import os
import pickle
from collections import defaultdict
import numpy as np
import torch
import torch.distributions as distributions
import tqdm
# Project imports
from core.evaluation_tools import evaluation_utils, scoring_rules
from core.evaluation_tools.evaluation_utils import (
calculate_iou,
get_test_thing_dataset_id_to_train_contiguous_id_dict,
)
from core.setup import setup_arg_parser, setup_config
from detectron2.checkpoint import DetectionCheckpointer
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
from detectron2.modeling import build_model
from matplotlib import image
from matplotlib import pyplot as plt
from matplotlib.pyplot import hist
from prettytable import PrettyTable
from probabilistic_inference.inference_utils import get_inference_output_dir
from probabilistic_modeling.losses import (
compute_negative_log_likelihood,
negative_log_likelihood,
)
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessGMM,
PoissonPointProcessIntensityFunction,
PoissonPointProcessUniform,
PoissonPointUnion,
)
from scipy.spatial.distance import mahalanobis
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
AREA_LIMITS = {"small": [0, 1024], "medium": [1024, 9216], "large": [9216, np.inf]}
def try_squeeze(to_squeeze, dim):
return to_squeeze.squeeze(dim) if len(to_squeeze.shape) > dim else to_squeeze
def print_nll_results_by_size(
out, gt_boxes, inference_output_dir, area_limits=AREA_LIMITS, prefix=""
):
title_dict = {
"matched_bernoulli_clss": "Matched Bernoulli Classification",
"matched_bernoulli_cls": "Matched Bernoulli Classification",
"matched_bernoulli_reg": "Matched Bernoulli Regression",
"matched_bernoulli_regs": "Matched Bernoulli Regression",
"matched_bernoulli": "Matched Bernoulli",
"matched_bernoullis": "Matched Bernoulli",
"matched_ppp": "Matched PPP",
"matched_ppps": "Matched PPP",
}
def plot_histogram(
size_decomp, decomp_key, area_limits, filepath, max_limit=40, nbins=100
):
plt.clf()
for size in size_decomp.keys():
hist(
np.clip(size_decomp[size][decomp_key], 0, max_limit),
nbins,
alpha=0.33,
label=size,
ec=(0, 0, 0, 0),
lw=0.0,
)
plt.title(title_dict[decomp_key])
plt.legend()
plt.xlim(0, max_limit)
plt.savefig(
os.path.join(filepath, f"{prefix}{decomp_key}.svg"),
format="svg",
transparent=True,
)
size_decomp = {size: defaultdict(list) for size in area_limits.keys()}
for img_id, out_dict in out.items():
boxes = gt_boxes[img_id].reshape(-1, 4)
decomp = out_dict["decomposition"]
# Remove unmatched detections and sort in gt-order instead
association = np.array(out_dict["associations"][0])
if not len(association):
continue
association = association[association[:, 1] > -1]
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
num_gts = len(areas)
num_preds = (
decomp["num_unmatched_bernoulli"][0] + decomp["num_matched_bernoulli"][0]
)
ppp_association = association[association[:, 0] >= num_preds]
for size, limit in area_limits.items():
mask = torch.logical_and(limit[0] < areas, limit[1] > areas)
gt_idx = mask.nonzero()
matched_bernoulli_regs = [
comp
for assoc, comp in zip(association, decomp["matched_bernoulli_regs"][0])
if assoc[1] in gt_idx
]
size_decomp[size]["matched_bernoulli_regs"] += matched_bernoulli_regs
size_decomp[size]["matched_bernoulli_reg"] += [sum(matched_bernoulli_regs)]
matched_bernoulli_clss = [
comp
for assoc, comp in zip(association, decomp["matched_bernoulli_clss"][0])
if assoc[1] in gt_idx
]
size_decomp[size]["matched_bernoulli_clss"] += matched_bernoulli_clss
size_decomp[size]["matched_bernoulli_cls"] += [sum(matched_bernoulli_clss)]
size_decomp[size]["matched_bernoullis"] += [
cls_part + reg_part
for cls_part, reg_part in zip(
matched_bernoulli_clss, matched_bernoulli_regs
)
]
size_decomp[size]["matched_bernoulli"] += [
sum(matched_bernoulli_regs) + sum(matched_bernoulli_clss)
]
matched_ppps = [
comp
for assoc, comp in zip(ppp_association, decomp["matched_ppps"][0])
if assoc[1] in gt_idx
]
size_decomp[size]["matched_ppps"] += matched_ppps
size_decomp[size]["matched_ppp"] += [sum(matched_ppps)]
for size, limit in area_limits.items():
print(f"******** Size: {size} ********")
print(
f"Mean matched Bernoulli: {np.mean(size_decomp[size]['matched_bernoulli']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_bernoullis']):.2f}")
print(
f"Mean matched Bernoulli reg: {np.mean(size_decomp[size]['matched_bernoulli_reg']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_bernoulli_regs']):.2f}")
print(
f"Mean matched Bernoulli cls: {np.mean(size_decomp[size]['matched_bernoulli_cls']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_bernoulli_clss']):.2f}")
print(
f"Mean matched PPP: {np.mean(size_decomp[size]['matched_ppp']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_ppps']):.2f}")
print(f"**************************")
for decomp_key in size_decomp[list(area_limits.keys())[0]]:
plot_histogram(size_decomp, decomp_key, area_limits, inference_output_dir)
def print_nll_results(out):
nlls = torch.tensor([el["nll"] for el in out.values() if el["nll"] > 0])
print("*" * 40)
print("*" * 12 + "PMB NLL results" + "*" * 13)
print("*" * 40)
print(f"Min NLL: {nlls.min().item()}")
print(f"Mean NLL: {nlls.mean().item()}")
print(f"Median NLL: {nlls.median().item()}")
print(f"Max NLL: {nlls.max().item()}")
print(f"Binned NLL: {torch.histc(nlls, bins=20).tolist()}")
print("*" * 40)
matched_bernoulli = []
matched_bernoulli_reg = []
matched_bernoulli_cls = []
num_matched_bernoulli = []
unmatched_bernoulli = []
num_unmatched_bernoulli = []
matched_ppp = []
num_matched_ppp = []
ppp_integral = []
for img_id, out_dict in out.items():
decomp = out_dict["decomposition"]
matched_bernoulli.append(decomp["matched_bernoulli"][0])
matched_bernoulli_reg.append(decomp["matched_bernoulli_reg"][0])
matched_bernoulli_cls.append(decomp["matched_bernoulli_cls"][0])
num_matched_bernoulli.append(decomp["num_matched_bernoulli"][0])
unmatched_bernoulli.append(decomp["unmatched_bernoulli"][0])
num_unmatched_bernoulli.append(decomp["num_unmatched_bernoulli"][0])
matched_ppp.append(decomp["matched_ppp"][0])
num_matched_ppp.append(decomp["num_matched_ppp"][0])
ppp_integral.append(decomp["ppp_integral"])
matched_bernoulli = np.array(matched_bernoulli)
matched_bernoulli_reg = np.array(matched_bernoulli_reg)
matched_bernoulli_cls = np.array(matched_bernoulli_cls)
num_matched_bernoulli = np.array(num_matched_bernoulli)
unmatched_bernoulli = np.array(unmatched_bernoulli)
num_unmatched_bernoulli = np.array(num_unmatched_bernoulli)
matched_ppp = np.array(matched_ppp)
num_matched_ppp = np.array(num_matched_ppp)
num_matched_ppp = num_matched_ppp[matched_ppp < np.inf]
matched_ppp = matched_ppp[matched_ppp < np.inf]
matched_bernoulli_norm = matched_bernoulli.sum() / (num_matched_bernoulli.sum())
matched_bernoulli_reg_norm = matched_bernoulli_reg.sum() / (
num_matched_bernoulli.sum()
)
matched_bernoulli_cls_norm = matched_bernoulli_cls.sum() / (
num_matched_bernoulli.sum()
)
print(f"Mean matched Bernoulli: {np.mean(matched_bernoulli):.2f}/", end="")
print(f"{matched_bernoulli_norm:.2f}")
print(f"Mean matched Bernoulli reg: {np.mean(matched_bernoulli_reg):.2f}/", end="")
print(f"{matched_bernoulli_reg_norm:.2f}")
print(f"Mean matched Bernoulli cls: {np.mean(matched_bernoulli_cls):.2f}/", end="")
print(f"{matched_bernoulli_cls_norm:.2f}")
unmatched_bernoulli_norm = unmatched_bernoulli.sum() / (
num_unmatched_bernoulli.sum()
)
print(f"Mean unmatched Bernoulli: {np.mean(unmatched_bernoulli):.2f}/", end="")
print(f"{unmatched_bernoulli_norm:.2f}")
matched_ppp_norm = matched_ppp.sum() / num_matched_ppp.sum()
print(f"Mean matched PPP: {np.mean(matched_ppp):.2f}/", end="")
print(f"{matched_ppp_norm:.2f}")
print(f"Mean PPP integral: {np.mean(ppp_integral):.2f}")
print("*" * 40)
def plot_nll_results(out, inference_output_dir, prefix=""):
matched_bernoulli = []
matched_bernoulli_reg = []
matched_bernoulli_cls = []
num_matched_bernoulli = []
unmatched_bernoulli = []
num_unmatched_bernoulli = []
matched_ppp = []
num_matched_ppp = []
ppp_integral = []
for img_id, out_dict in out.items():
decomp = out_dict["decomposition"]
matched_bernoulli += [
reg + classification
for reg, classification in zip(
decomp["matched_bernoulli_regs"][0],
decomp["matched_bernoulli_clss"][0],
)
]
matched_bernoulli_reg += decomp["matched_bernoulli_regs"][0]
matched_bernoulli_cls += decomp["matched_bernoulli_clss"][0]
num_matched_bernoulli.append(decomp["num_matched_bernoulli"][0])
unmatched_bernoulli += decomp["unmatched_bernoullis"][0]
num_unmatched_bernoulli.append(decomp["num_unmatched_bernoulli"][0])
matched_ppp += decomp["matched_ppps"][0]
num_matched_ppp.append(decomp["num_matched_ppp"][0])
ppp_integral.append(decomp["ppp_integral"])
plt.figure()
plt.hist(np.clip(matched_bernoulli, 0, 40), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 40)
plt.title("Matched Bernoulli")
plt.savefig(
os.path.join(inference_output_dir, f"{prefix}matched_bernoulli_histogram.svg"),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(matched_bernoulli_reg, 0, 40), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 40)
plt.title("Matched Bernoulli regression")
plt.savefig(
os.path.join(
inference_output_dir, f"{prefix}matched_bernoulli_reg_histogram.svg"
),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(matched_bernoulli_cls, 0, 5), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 5)
plt.title("Matched Bernoulli Classification")
plt.savefig(
os.path.join(
inference_output_dir, f"{prefix}matched_bernoulli_cls_histogram.svg"
),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(unmatched_bernoulli, 0, 10), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 10)
plt.title("Unmatched Bernoulli")
plt.savefig(
os.path.join(
inference_output_dir, f"{prefix}unmatched_bernoulli_histogram.svg"
),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(matched_ppp, 0, 40), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 40)
plt.title("Matched PPP")
plt.savefig(
os.path.join(inference_output_dir, f"{prefix}matched_ppp_histogram.svg"),
format="svg",
transparent=True,
)
def compute_pmb_nll(
cfg,
inference_output_dir,
cat_mapping_dict,
min_allowed_score=0.0,
print_results=True,
plot_results=True,
print_by_size=True,
load_nll_results=True,
):
results_file = os.path.join(
inference_output_dir, f"nll_results_minallowedscore_{min_allowed_score}.pkl"
)
if load_nll_results and os.path.isfile(results_file):
with open(results_file, "rb") as f:
out = pickle.load(f)
if print_results:
print_nll_results(out)
if plot_results:
plot_nll_results(out, inference_output_dir)
if print_by_size:
(
preprocessed_predicted_instances,
preprocessed_gt_instances,
) = evaluation_utils.get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score
)
gt_boxes = preprocessed_gt_instances["gt_boxes"]
print_nll_results_by_size(out, gt_boxes, inference_output_dir)
return out
with torch.no_grad():
# Load predictions and GT
(
preprocessed_predicted_instances,
preprocessed_gt_instances,
) = evaluation_utils.get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score
)
predicted_box_means = preprocessed_predicted_instances["predicted_boxes"]
predicted_cls_probs = preprocessed_predicted_instances["predicted_cls_probs"]
predicted_box_covariances = preprocessed_predicted_instances[
"predicted_covar_mats"
]
if "ppp_weights" in preprocessed_predicted_instances:
predicted_ppp = preprocessed_predicted_instances["ppp_weights"]
elif "log_ppp_intensity" in preprocessed_predicted_instances:
predicted_ppp = preprocessed_predicted_instances["log_ppp_intensity"]
else:
predicted_ppp = defaultdict(list)
if cfg.PROBABILISTIC_INFERENCE.LOAD_PPP_FROM_MODEL:
model = build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=True
)
ppp = model.get_ppp_intensity_function()
ppp.set_normalization_of_bboxes(True)
ppp.update_distribution()
predicted_ppp = defaultdict(int)
image_sizes = preprocessed_predicted_instances["image_size"]
gt_box_means = preprocessed_gt_instances["gt_boxes"]
gt_cat_idxs = preprocessed_gt_instances["gt_cat_idxs"]
# Initialize results
out = defaultdict(dict)
print("[NLLOD] Started evaluating NLL for dataset.")
with tqdm.tqdm(total=len(predicted_box_means)) as pbar:
for image_id in predicted_box_means:
ppp_mix = PoissonPointUnion()
pbar.update(1)
image_size = image_sizes[image_id]
################ GT STUFF ###########################
gt_boxes = gt_box_means[image_id]
if len(gt_boxes.shape) < 2:
gt_boxes = gt_boxes.view(-1, 4)
gt_classes = (
torch.as_tensor(
[
cat_mapping_dict[cat_id.item()]
for cat_id in gt_cat_idxs[image_id].long().view(-1, 1)
]
)
.long()
.to(device)
)
################# PREDICTION STUFF ####################
pred_cls_probs = predicted_cls_probs[image_id].clamp(1e-6, 1 - 1e-6)
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
num_classes = pred_cls_probs.shape[-1]
scores_have_bg_cls = False
else:
num_classes = pred_cls_probs.shape[-1] - 1
scores_have_bg_cls = True
pred_box_means = (
predicted_box_means[image_id].unsqueeze(1).repeat(1, num_classes, 1)
)
pred_box_covs = predicted_box_covariances[image_id]
pred_box_covs = pred_box_covs.unsqueeze(1).repeat(1, num_classes, 1, 1)
pred_ppp_weights = predicted_ppp[image_id]
if not cfg.PROBABILISTIC_INFERENCE.TREAT_AS_MB:
if cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES > 0:
if scores_have_bg_cls:
max_conf = 1 - pred_cls_probs[..., -1]
else:
max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
ppp_preds_idx = (
max_conf <= cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES
)
if not ppp_preds_idx.any():
ppp_preds = PoissonPointProcessIntensityFunction(
cfg, log_intensity=-np.inf, device=gt_boxes.device
)
else:
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
mixture_dict["covs"] = pred_box_covs[ppp_preds_idx, 0]
mixture_dict["cls_probs"] = pred_cls_probs[
ppp_preds_idx, :num_classes
]
mixture_dict[
"reg_dist_type"
] = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
if (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "gaussian"
):
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"covariance_matrix": mixture_dict["covs"]
}
elif (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "laplacian"
):
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": torch.sqrt(
mixture_dict["covs"].diagonal(dim1=-2, dim2=-1)
/ 2
)
}
ppp_preds = PoissonPointProcessIntensityFunction(
cfg, predictions=mixture_dict
)
pred_box_means = pred_box_means[ppp_preds_idx.logical_not()]
pred_box_covs = pred_box_covs[ppp_preds_idx.logical_not()]
pred_cls_probs = pred_cls_probs[ppp_preds_idx.logical_not()]
ppp_mix.add_ppp(ppp_preds)
if cfg.PROBABILISTIC_INFERENCE.LOAD_PPP_FROM_MODEL:
ppp = ppp
elif isinstance(pred_ppp_weights, dict):
ppp = PoissonPointProcessIntensityFunction(
cfg, device=gt_boxes.device
)
ppp.load_weights(pred_ppp_weights)
elif isinstance(pred_ppp_weights, torch.Tensor):
ppp = PoissonPointProcessIntensityFunction(
cfg, log_intensity=pred_ppp_weights, device=gt_boxes.device
)
else:
print(
"[NLLOD] PPP intensity function not found in annotations, using config"
)
pred_ppp_weights = -np.inf
ppp = PoissonPointProcessIntensityFunction(
cfg, log_intensity=pred_ppp_weights, device=gt_boxes.device
)
else:
pred_ppp_weights = -np.inf
ppp = PoissonPointProcessIntensityFunction(
cfg, log_intensity=pred_ppp_weights
)
ppp_mix.add_ppp(ppp)
if (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "gaussian"
):
reg_distribution = lambda x, y: distributions.multivariate_normal.MultivariateNormal(
x, y
)
elif (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "laplacian"
):
reg_distribution = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=torch.sqrt(y.diagonal(dim1=-2, dim2=-1) / 2)
)
else:
raise Exception(
f"Bounding box uncertainty distribution {cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE} is not available."
)
try:
nll, associations, decompositions = negative_log_likelihood(
pred_box_scores=[pred_cls_probs],
pred_box_regs=[pred_box_means],
pred_box_covars=[pred_box_covs],
gt_boxes=[gt_boxes],
gt_classes=[gt_classes],
image_sizes=[image_size],
reg_distribution=reg_distribution,
intensity_func=ppp_mix,
max_n_solutions=cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS,
training=False,
scores_have_bg_cls=scores_have_bg_cls,
)
out[image_id] = {
"nll": nll.item(),
"associations": associations[0].tolist(),
"decomposition": decompositions[0],
}
except Exception as e:
print(
f"Image {image_id} raised error. Will not be used to calculate NLL."
)
print(e)
with open(
os.path.join(
inference_output_dir,
f"nll_results_minallowedscore_{min_allowed_score}.pkl",
),
"wb",
) as f:
pickle.dump(out, f)
if print_results:
print_nll_results(out)
if plot_results:
plot_nll_results(out, inference_output_dir)
if print_by_size:
gt_boxes = preprocessed_gt_instances["gt_boxes"]
print_nll_results_by_size(out, gt_boxes, inference_output_dir)
return out
def main(
args,
cfg=None,
iou_min=None,
iou_correct=None,
min_allowed_score=None,
print_results=True,
inference_output_dir="",
image_ids=[],
):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
if inference_output_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
# Get thresholds to perform evaluation on
if iou_min is None:
iou_min = args.iou_min
if iou_correct is None:
iou_correct = args.iou_correct
if min_allowed_score is None or min_allowed_score < 0:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on OpenImages.
try:
with open(os.path.join(inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip("][\n").split(", ")[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset
).thing_dataset_id_to_contiguous_id
cat_mapping_dict = get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Compute NLL results
load_nll_results = len(image_ids) == 0
nll_results = compute_pmb_nll(
cfg, inference_output_dir, cat_mapping_dict, min_allowed_score, print_results, load_nll_results=load_nll_results
)
# Get matched results by either generating them or loading from file.
with torch.no_grad():
matched_results = evaluation_utils.get_matched_results(
cfg,
inference_output_dir,
iou_min=iou_min,
iou_correct=iou_correct,
min_allowed_score=min_allowed_score,
)
# Build preliminary dicts required for computing classification scores.
for matched_results_key in matched_results.keys():
if "gt_cat_idxs" in matched_results[matched_results_key].keys():
# First we convert the written things indices to contiguous
# indices.
gt_converted_cat_idxs = matched_results[matched_results_key][
"gt_cat_idxs"
]
gt_converted_cat_idxs = try_squeeze(gt_converted_cat_idxs, 1)
gt_converted_cat_idxs = torch.as_tensor(
[
cat_mapping_dict[class_idx.cpu().tolist()]
for class_idx in gt_converted_cat_idxs
]
).to(device)
matched_results[matched_results_key][
"gt_converted_cat_idxs"
] = gt_converted_cat_idxs.to(device)
if "predicted_cls_probs" in matched_results[matched_results_key].keys():
predicted_cls_probs = matched_results[matched_results_key][
"predicted_cls_probs"
]
# This is required for evaluation of retinanet based
# detections.
matched_results[matched_results_key][
"predicted_score_of_gt_category"
] = torch.gather(
predicted_cls_probs, 1, gt_converted_cat_idxs.unsqueeze(1)
).squeeze(
1
)
matched_results[matched_results_key][
"gt_cat_idxs"
] = gt_converted_cat_idxs
else:
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
# For false positives, the correct category is background. For retinanet, since no explicit
# background category is available, this value is computed as 1.0 - score of the predicted
# category.
predicted_class_probs, predicted_class_idx = matched_results[
matched_results_key
]["predicted_cls_probs"].max(1)
matched_results[matched_results_key][
"predicted_score_of_gt_category"
] = (1.0 - predicted_class_probs)
matched_results[matched_results_key][
"predicted_cat_idxs"
] = predicted_class_idx
else:
# For RCNN/DETR based networks, a background category is
# explicitly available.
matched_results[matched_results_key][
"predicted_score_of_gt_category"
] = matched_results[matched_results_key]["predicted_cls_probs"][
:, -1
]
_, predicted_class_idx = matched_results[matched_results_key][
"predicted_cls_probs"
][:, :-1].max(1)
matched_results[matched_results_key][
"predicted_cat_idxs"
] = predicted_class_idx
# Load the different detection partitions
true_positives = matched_results["true_positives"]
duplicates = matched_results["duplicates"]
localization_errors = matched_results["localization_errors"]
false_negatives = matched_results["false_negatives"]
false_positives = matched_results["false_positives"]
# Get the number of elements in each partition
num_true_positives = true_positives["predicted_box_means"].shape[0]
num_duplicates = duplicates["predicted_box_means"].shape[0]
num_localization_errors = localization_errors["predicted_box_means"].shape[0]
num_false_negatives = false_negatives["gt_box_means"].shape[0]
num_false_positives = false_positives["predicted_box_means"].shape[0]
per_class_output_list = []
for class_idx in cat_mapping_dict.values():
true_positives_valid_idxs = (
true_positives["gt_converted_cat_idxs"] == class_idx
)
localization_errors_valid_idxs = (
localization_errors["gt_converted_cat_idxs"] == class_idx
)
duplicates_valid_idxs = duplicates["gt_converted_cat_idxs"] == class_idx
false_positives_valid_idxs = (
false_positives["predicted_cat_idxs"] == class_idx
)
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
# Compute classification metrics for every partition
true_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
true_positives, true_positives_valid_idxs
)
localization_errors_cls_analysis = (
scoring_rules.sigmoid_compute_cls_scores(
localization_errors, localization_errors_valid_idxs
)
)
duplicates_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
duplicates, duplicates_valid_idxs
)
false_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
false_positives, false_positives_valid_idxs
)
else:
# Compute classification metrics for every partition
true_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
true_positives, true_positives_valid_idxs
)
localization_errors_cls_analysis = (
scoring_rules.softmax_compute_cls_scores(
localization_errors, localization_errors_valid_idxs
)
)
duplicates_cls_analysis = scoring_rules.softmax_compute_cls_scores(
duplicates, duplicates_valid_idxs
)
false_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
false_positives, false_positives_valid_idxs
)
# Compute regression metrics for every partition
true_positives_reg_analysis = scoring_rules.compute_reg_scores(
true_positives, true_positives_valid_idxs
)
localization_errors_reg_analysis = scoring_rules.compute_reg_scores(
localization_errors, localization_errors_valid_idxs
)
duplicates_reg_analysis = scoring_rules.compute_reg_scores(
duplicates, duplicates_valid_idxs
)
false_positives_reg_analysis = scoring_rules.compute_reg_scores_fn(
false_positives, false_positives_valid_idxs
)
per_class_output_list.append(
{
"true_positives_cls_analysis": true_positives_cls_analysis,
"true_positives_reg_analysis": true_positives_reg_analysis,
"localization_errors_cls_analysis": localization_errors_cls_analysis,
"localization_errors_reg_analysis": localization_errors_reg_analysis,
"duplicates_cls_analysis": duplicates_cls_analysis,
"duplicates_reg_analysis": duplicates_reg_analysis,
"false_positives_cls_analysis": false_positives_cls_analysis,
"false_positives_reg_analysis": false_positives_reg_analysis,
}
)
final_accumulated_output_dict = dict()
final_average_output_dict = dict()
for key in per_class_output_list[0].keys():
average_output_dict = dict()
for inner_key in per_class_output_list[0][key].keys():
collected_values = [
per_class_output[key][inner_key]
if per_class_output[key][inner_key] is not None
else np.NaN
for per_class_output in per_class_output_list
]
collected_values = np.array(collected_values)
if key in average_output_dict.keys():
# Use nan mean since some classes do not have duplicates for
# instance or has one duplicate for instance. torch.std returns nan in that case
# so we handle those here. This should not have any effect on the final results, as
# it only affects inter-class variance which we do not
# report anyways.
average_output_dict[key].update(
{
inner_key: np.nanmean(collected_values),
inner_key + "_std": np.nanstd(collected_values, ddof=1),
}
)
final_accumulated_output_dict[key].update(
{inner_key: collected_values}
)
else:
average_output_dict.update(
{
key: {
inner_key: np.nanmean(collected_values),
inner_key + "_std": np.nanstd(collected_values, ddof=1),
}
}
)
final_accumulated_output_dict.update(
{key: {inner_key: collected_values}}
)
final_average_output_dict.update(average_output_dict)
final_accumulated_output_dict.update(
{
"num_instances": {
"num_true_positives": num_true_positives,
"num_duplicates": num_duplicates,
"num_localization_errors": num_localization_errors,
"num_false_positives": num_false_positives,
"num_false_negatives": num_false_negatives,
}
}
)
if print_results:
# Summarize and print all
table = PrettyTable()
table.field_names = [
"Output Type",
"Number of Instances",
"Cls Negative Log Likelihood",
"Cls Brier Score",
"Reg TP Negative Log Likelihood / FP Entropy",
"Reg Energy Score",
]
table.add_row(
[
"True Positives:",
num_true_positives,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["true_positives_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["true_positives_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_reg_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["true_positives_reg_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_reg_analysis"][
"energy_score_mean"
],
final_average_output_dict["true_positives_reg_analysis"][
"energy_score_mean_std"
],
),
]
)
table.add_row(
[
"Duplicates:",
num_duplicates,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["duplicates_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["duplicates_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_reg_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["duplicates_reg_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_reg_analysis"][
"energy_score_mean"
],
final_average_output_dict["duplicates_reg_analysis"][
"energy_score_mean_std"
],
),
]
)
table.add_row(
[
"Localization Errors:",
num_localization_errors,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["localization_errors_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["localization_errors_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_reg_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["localization_errors_reg_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_reg_analysis"][
"energy_score_mean"
],
final_average_output_dict["localization_errors_reg_analysis"][
"energy_score_mean_std"
],
),
]
)
table.add_row(
[
"False Positives:",
num_false_positives,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["false_positives_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["false_positives_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["false_positives_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["false_positives_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["false_positives_reg_analysis"][
"total_entropy_mean"
],
final_average_output_dict["false_positives_reg_analysis"][
"total_entropy_mean_std"
],
),
"-",
]
)
table.add_row(["False Negatives:", num_false_negatives, "-", "-", "-", "-"])
print(table)
text_file_name = os.path.join(
inference_output_dir,
"probabilistic_scoring_res_{}_{}_{}.txt".format(
iou_min, iou_correct, min_allowed_score
),
)
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir,
"probabilistic_scoring_res_{}_{}_{}.pkl".format(
iou_min, iou_correct, min_allowed_score
),
)
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(final_accumulated_output_dict, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 44,542
| 40.785178
| 149
|
py
|
pmb-nll
|
pmb-nll-main/src/offline_evaluation/compute_average_precision.py
|
import os
import numpy as np
# Project imports
from core.setup import setup_arg_parser, setup_config
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
from probabilistic_inference.inference_utils import get_inference_output_dir
# Coco evaluator tools
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def main(args, cfg=None, inference_output_dir="", image_ids=[]):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Build path to inference output
if inference_output_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
prediction_file_name = os.path.join(
inference_output_dir, "coco_instances_results.json"
)
meta_catalog = MetadataCatalog.get(args.test_dataset)
# Evaluate detection results
gt_coco_api = COCO(meta_catalog.json_file)
if len(image_ids):
gt_coco_api.anns = {
ann_key: ann_val
for ann_key, ann_val in gt_coco_api.anns.items()
if ann_val["image_id"] in image_ids
}
gt_coco_api.catToImgs = {
cat: [id for id in img_ids if id in image_ids]
for cat, img_ids in gt_coco_api.catToImgs.items()
if len([id for id in img_ids if id in image_ids])
}
gt_coco_api.imgToAnns = {
id: ann for id, ann in gt_coco_api.imgToAnns.items() if id in image_ids
}
gt_coco_api.imgs = {
id: info for id, info in gt_coco_api.imgs.items() if id in image_ids
}
res_coco_api = gt_coco_api.loadRes(prediction_file_name)
results_api = COCOeval(gt_coco_api, res_coco_api, iouType="bbox")
results_api.params.catIds = list(
meta_catalog.thing_dataset_id_to_contiguous_id.keys()
)
# Calculate and print aggregate results
results_api.evaluate()
results_api.accumulate()
results_api.summarize()
# Compute optimal micro F1 score threshold. We compute the f1 score for
# every class and score threshold. We then compute the score threshold that
# maximizes the F-1 score of every class. The final score threshold is the average
# over all classes.
precisions = results_api.eval["precision"].mean(0)[:, :, 0, 2]
recalls = np.expand_dims(results_api.params.recThrs, 1)
f1_scores = 2 * (precisions * recalls) / (precisions + recalls)
optimal_f1_score = f1_scores.argmax(0)
scores = results_api.eval["scores"].mean(0)[:, :, 0, 2]
optimal_score_threshold = [
scores[optimal_f1_score_i, i]
for i, optimal_f1_score_i in enumerate(optimal_f1_score)
]
optimal_score_threshold = np.array(optimal_score_threshold)
optimal_score_threshold = optimal_score_threshold[optimal_score_threshold != 0]
optimal_score_threshold = optimal_score_threshold.mean()
print(
"Classification Score at Optimal F-1 Score: {}".format(optimal_score_threshold)
)
text_file_name = os.path.join(inference_output_dir, "mAP_res.txt")
with open(text_file_name, "w") as text_file:
print(
results_api.stats.tolist()
+ [
optimal_score_threshold,
],
file=text_file,
)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 3,798
| 31.194915
| 87
|
py
|
pmb-nll
|
pmb-nll-main/src/offline_evaluation/compute_ood_probabilistic_metrics.py
|
import itertools
import os
import torch
import ujson as json
import pickle
from prettytable import PrettyTable
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools import scoring_rules
from core.evaluation_tools.evaluation_utils import eval_predictions_preprocess
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(
args,
cfg=None,
min_allowed_score=None):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET, and not on VOC.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get matched results by either generating them or loading from file.
with torch.no_grad():
try:
preprocessed_predicted_instances = torch.load(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_{}.pth".format(min_allowed_score)),
map_location=device)
# Process predictions
except FileNotFoundError:
prediction_file_name = os.path.join(
inference_output_dir,
'coco_instances_results.json')
predicted_instances = json.load(open(prediction_file_name, 'r'))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score=min_allowed_score, is_odd=True)
torch.save(
preprocessed_predicted_instances,
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_{}.pth".format(min_allowed_score)))
predicted_boxes = preprocessed_predicted_instances['predicted_boxes']
predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_boxes = list(itertools.chain.from_iterable(
[predicted_boxes[key] for key in predicted_boxes.keys()]))
predicted_cov_mats = list(itertools.chain.from_iterable(
[predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))
predicted_cls_probs = list(itertools.chain.from_iterable(
[predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))
num_false_positives = len(predicted_boxes)
valid_idxs = torch.as_tensor(
[i for i in range(num_false_positives)]).to(device)
predicted_boxes = torch.stack(predicted_boxes, 1).transpose(0, 1)
predicted_cov_mats = torch.stack(predicted_cov_mats, 1).transpose(0, 1)
predicted_cls_probs = torch.stack(
predicted_cls_probs,
1).transpose(
0,
1)
false_positives_dict = {
'predicted_box_means': predicted_boxes,
'predicted_box_covariances': predicted_cov_mats,
'predicted_cls_probs': predicted_cls_probs}
false_positives_reg_analysis = scoring_rules.compute_reg_scores_fn(
false_positives_dict, valid_idxs)
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
predicted_class_probs, predicted_class_idx = predicted_cls_probs.max(
1)
false_positives_dict['predicted_score_of_gt_category'] = 1.0 - \
predicted_class_probs
false_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
false_positives_dict, valid_idxs)
else:
false_positives_dict['predicted_score_of_gt_category'] = predicted_cls_probs[:, -1]
_, predicted_class_idx = predicted_cls_probs[:, :-1].max(
1)
false_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
false_positives_dict, valid_idxs)
# Summarize and print all
table = PrettyTable()
table.field_names = (['Output Type',
'Number of Instances',
'Cls Ignorance Score',
'Cls Brier/Probability Score',
'Reg Ignorance Score',
'Reg Energy Score'])
table.add_row(
[
"False Positives:",
num_false_positives,
'{:.4f}'.format(
false_positives_cls_analysis['ignorance_score_mean'],),
'{:.4f}'.format(
false_positives_cls_analysis['brier_score_mean']),
'{:.4f}'.format(
false_positives_reg_analysis['total_entropy_mean']),
'{:.4f}'.format(
false_positives_reg_analysis['fp_energy_score_mean'])])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_odd_{}.txt'.format(min_allowed_score))
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_odd_{}.pkl'.format(min_allowed_score))
false_positives_reg_analysis.update(false_positives_cls_analysis)
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(false_positives_reg_analysis, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 7,146
| 38.486188
| 116
|
py
|
pmb-nll
|
pmb-nll-main/src/offline_evaluation/compute_calibration_errors.py
|
import calibration as cal
import os
import pickle
import torch
from prettytable import PrettyTable
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
# Project imports
from core.evaluation_tools import evaluation_utils
from core.evaluation_tools.evaluation_utils import get_test_thing_dataset_id_to_train_contiguous_id_dict
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(
args,
cfg=None,
iou_min=None,
iou_correct=None,
min_allowed_score=None,
print_results=True,
inference_output_dir=""):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
if inference_output_dir == "":
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get thresholds to perform evaluation on
if iou_min is None:
iou_min = args.iou_min
if iou_correct is None:
iou_correct = args.iou_correct
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on OpenImages.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset).thing_dataset_id_to_contiguous_id
cat_mapping_dict = get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id)
# Get matched results by either generating them or loading from file.
with torch.no_grad():
matched_results = evaluation_utils.get_matched_results(
cfg, inference_output_dir,
iou_min=iou_min,
iou_correct=iou_correct,
min_allowed_score=min_allowed_score)
# Build preliminary dicts required for computing classification scores.
for matched_results_key in matched_results.keys():
if 'gt_cat_idxs' in matched_results[matched_results_key].keys():
# First we convert the written things indices to contiguous
# indices.
gt_converted_cat_idxs = matched_results[matched_results_key]['gt_cat_idxs'].squeeze(
1)
gt_converted_cat_idxs = torch.as_tensor([cat_mapping_dict[class_idx.cpu(
).tolist()] for class_idx in gt_converted_cat_idxs]).to(device)
matched_results[matched_results_key]['gt_converted_cat_idxs'] = gt_converted_cat_idxs.to(
device)
matched_results[matched_results_key]['gt_cat_idxs'] = gt_converted_cat_idxs
if 'predicted_cls_probs' in matched_results[matched_results_key].keys(
):
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
# For false positives, the correct category is background. For retinanet, since no explicit
# background category is available, this value is computed as 1.0 - score of the predicted
# category.
predicted_class_probs, predicted_cat_idxs = matched_results[matched_results_key][
'predicted_cls_probs'].max(
1)
matched_results[matched_results_key]['output_logits'] = predicted_class_probs
else:
predicted_class_probs, predicted_cat_idxs = matched_results[
matched_results_key]['predicted_cls_probs'][:, :-1].max(1)
matched_results[matched_results_key]['predicted_cat_idxs'] = predicted_cat_idxs
# Load the different detection partitions
true_positives = matched_results['true_positives']
duplicates = matched_results['duplicates']
localization_errors = matched_results['localization_errors']
false_positives = matched_results['false_positives']
reg_maximum_calibration_error_list = []
reg_expected_calibration_error_list = []
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
all_predicted_scores = torch.cat(
(true_positives['predicted_cls_probs'].flatten(),
duplicates['predicted_cls_probs'].flatten(),
localization_errors['predicted_cls_probs'].flatten(),
false_positives['predicted_cls_probs'].flatten()),
0)
all_gt_scores = torch.cat(
(torch.nn.functional.one_hot(
true_positives['gt_cat_idxs'],
true_positives['predicted_cls_probs'].shape[1]).flatten().to(device),
torch.nn.functional.one_hot(
duplicates['gt_cat_idxs'],
duplicates['predicted_cls_probs'].shape[1]).flatten().to(device),
torch.zeros_like(
localization_errors['predicted_cls_probs'].type(
torch.LongTensor).flatten()).to(device),
torch.zeros_like(
false_positives['predicted_cls_probs'].type(
torch.LongTensor).flatten()).to(device)),
0)
else:
# For RCNN based networks, a background category is
# explicitly available.
all_predicted_scores = torch.cat(
(true_positives['predicted_cls_probs'],
duplicates['predicted_cls_probs'],
localization_errors['predicted_cls_probs'],
false_positives['predicted_cls_probs']),
0)
all_gt_scores = torch.cat(
(true_positives['gt_cat_idxs'],
duplicates['gt_cat_idxs'],
torch.ones_like(
localization_errors['predicted_cls_probs'][:, 0]).fill_(80.0).type(
torch.LongTensor).to(device),
torch.ones_like(
false_positives['predicted_cls_probs'][:, 0]).fill_(80.0).type(
torch.LongTensor).to(device)), 0)
# Compute classification calibration error using calibration
# library
cls_marginal_calibration_error = cal.get_calibration_error(
all_predicted_scores.cpu().numpy(), all_gt_scores.cpu().numpy())
for class_idx in cat_mapping_dict.values():
true_positives_valid_idxs = true_positives['gt_converted_cat_idxs'] == class_idx
localization_errors_valid_idxs = localization_errors['gt_converted_cat_idxs'] == class_idx
duplicates_valid_idxs = duplicates['gt_converted_cat_idxs'] == class_idx
# Compute regression calibration errors. False negatives cant be evaluated since
# those do not have ground truth.
all_predicted_means = torch.cat(
(true_positives['predicted_box_means'][true_positives_valid_idxs],
duplicates['predicted_box_means'][duplicates_valid_idxs],
localization_errors['predicted_box_means'][localization_errors_valid_idxs]),
0)
all_predicted_covariances = torch.cat(
(true_positives['predicted_box_covariances'][true_positives_valid_idxs],
duplicates['predicted_box_covariances'][duplicates_valid_idxs],
localization_errors['predicted_box_covariances'][localization_errors_valid_idxs]),
0)
all_predicted_gt = torch.cat(
(true_positives['gt_box_means'][true_positives_valid_idxs],
duplicates['gt_box_means'][duplicates_valid_idxs],
localization_errors['gt_box_means'][localization_errors_valid_idxs]),
0)
all_predicted_covariances = torch.diagonal(
all_predicted_covariances, dim1=1, dim2=2)
# The assumption of uncorrelated components is not accurate, especially when estimating full
# covariance matrices. However, using scipy to compute multivariate cdfs is very very
# time consuming for such large amounts of data.
reg_maximum_calibration_error = []
reg_expected_calibration_error = []
# Regression calibration is computed for every box dimension
# separately, and averaged after.
for box_dim in range(all_predicted_gt.shape[1]):
all_predicted_means_current_dim = all_predicted_means[:, box_dim]
all_predicted_gt_current_dim = all_predicted_gt[:, box_dim]
all_predicted_covariances_current_dim = all_predicted_covariances[:, box_dim]
normal_dists = torch.distributions.Normal(
all_predicted_means_current_dim,
scale=torch.sqrt(all_predicted_covariances_current_dim))
all_predicted_scores = normal_dists.cdf(
all_predicted_gt_current_dim)
reg_calibration_error = []
histogram_bin_step_size = 1 / 15.0
for i in torch.arange(
0.0,
1.0 - histogram_bin_step_size,
histogram_bin_step_size):
# Get number of elements in bin
elements_in_bin = (
all_predicted_scores < (i + histogram_bin_step_size))
num_elems_in_bin_i = elements_in_bin.type(
torch.FloatTensor).to(device).sum()
# Compute calibration error from "Accurate uncertainties for deep
# learning using calibrated regression" paper.
reg_calibration_error.append(
(num_elems_in_bin_i / all_predicted_scores.shape[0] - (i + histogram_bin_step_size)) ** 2)
calibration_error = torch.stack(
reg_calibration_error).to(device)
reg_maximum_calibration_error.append(calibration_error.max())
reg_expected_calibration_error.append(calibration_error.mean())
reg_maximum_calibration_error_list.append(
reg_maximum_calibration_error)
reg_expected_calibration_error_list.append(
reg_expected_calibration_error)
# Summarize and print all
reg_expected_calibration_error = torch.stack([torch.stack(
reg, 0) for reg in reg_expected_calibration_error_list], 0)
reg_expected_calibration_error = reg_expected_calibration_error[
~torch.isnan(reg_expected_calibration_error)].mean()
reg_maximum_calibration_error = torch.stack([torch.stack(
reg, 0) for reg in reg_maximum_calibration_error_list], 0)
reg_maximum_calibration_error = reg_maximum_calibration_error[
~torch.isnan(reg_maximum_calibration_error)].mean()
if print_results:
table = PrettyTable()
table.field_names = (['Cls Marginal Calibration Error',
'Reg Expected Calibration Error',
'Reg Maximum Calibration Error'])
table.add_row([cls_marginal_calibration_error,
reg_expected_calibration_error.cpu().numpy().tolist(),
reg_maximum_calibration_error.cpu().numpy().tolist()])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'calibration_errors_{}_{}_{}.txt'.format(
iou_min, iou_correct, min_allowed_score))
with open(text_file_name, "w") as text_file:
print([
cls_marginal_calibration_error,
reg_expected_calibration_error.cpu().numpy().tolist(),
reg_maximum_calibration_error.cpu().numpy().tolist()], file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir, 'calibration_errors_res_{}_{}_{}.pkl'.format(
iou_min, iou_correct, min_allowed_score))
final_accumulated_output_dict = {
'cls_marginal_calibration_error': cls_marginal_calibration_error,
'reg_expected_calibration_error': reg_expected_calibration_error.cpu().numpy(),
'reg_maximum_calibration_error': reg_maximum_calibration_error.cpu().numpy()}
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(final_accumulated_output_dict, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 14,295
| 45.718954
| 116
|
py
|
pmb-nll
|
pmb-nll-main/src/offline_evaluation/__init__.py
| 0
| 0
| 0
|
py
|
|
pmb-nll
|
pmb-nll-main/src/offline_evaluation/average_metrics_over_iou_thresholds.py
|
import numpy as np
import os
import pickle
from prettytable import PrettyTable
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.setup import setup_config, setup_arg_parser
from offline_evaluation import compute_probabilistic_metrics, compute_calibration_errors
from probabilistic_inference.inference_utils import get_inference_output_dir
def main(args):
cfg = setup_config(args,
random_seed=args.random_seed,
is_testing=True)
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on OpenImages.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
iou_thresholds = np.arange(0.5, 1.0, 0.05).round(2)
probabilistic_detection_dicts = []
calibration_dicts = []
for iou_correct in iou_thresholds:
print("Processing detections at {} iou threshold...".format(iou_correct))
probabilistic_scores_file_name = os.path.join(
inference_output_dir, 'probabilistic_scoring_res_{}_{}_{}.pkl'.format(
args.iou_min, iou_correct, min_allowed_score))
calibration_file_name = os.path.join(
inference_output_dir, 'calibration_errors_res_{}_{}_{}.pkl'.format(
args.iou_min, iou_correct, min_allowed_score))
try:
with open(probabilistic_scores_file_name, "rb") as f:
probabilistic_scores = pickle.load(f)
except FileNotFoundError:
compute_probabilistic_metrics.main(
args, cfg, iou_correct=iou_correct, print_results=False)
with open(probabilistic_scores_file_name, "rb") as f:
probabilistic_scores = pickle.load(f)
try:
with open(calibration_file_name, "rb") as f:
calibration_errors = pickle.load(f)
except FileNotFoundError:
compute_calibration_errors.main(
args, cfg, iou_correct=iou_correct, print_results=False)
with open(calibration_file_name, "rb") as f:
calibration_errors = pickle.load(f)
probabilistic_detection_dicts.append(probabilistic_scores)
calibration_dicts.append(calibration_errors)
probabilistic_detection_final_dict = {
key: {} for key in probabilistic_detection_dicts[0].keys()}
for key in probabilistic_detection_dicts[0].keys():
for key_l2 in probabilistic_detection_dicts[0][key].keys():
accumulated_values = [
probabilistic_detection_dicts[i][key][key_l2] for i in range(
len(probabilistic_detection_dicts))]
probabilistic_detection_final_dict[key].update(
{key_l2: np.nanmean(np.array(accumulated_values), 0)})
calibration_final_dict = {key: None for key in calibration_dicts[0].keys()}
for key in calibration_dicts[0].keys():
accumulated_values = [
calibration_dicts[i][key] for i in range(
len(calibration_dicts))]
calibration_final_dict[key] = np.nanmean(
np.array(accumulated_values), 0)
dictionary_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_averaged_{}.pkl'.format(min_allowed_score))
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(probabilistic_detection_final_dict, pickle_file)
dictionary_file_name = os.path.join(
inference_output_dir, 'calibration_res_averaged_{}.pkl'.format(
min_allowed_score))
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(calibration_final_dict, pickle_file)
# Summarize and print all
table = PrettyTable()
table.field_names = (['Output Type',
'Cls Ignorance Score',
'Cls Brier/Probability Score',
'Reg Ignorance Score',
'Reg Energy Score'])
table.add_row(
[
"True Positives:",
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['true_positives_cls_analysis']['ignorance_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['true_positives_cls_analysis']['brier_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['true_positives_reg_analysis']['ignorance_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['true_positives_reg_analysis']['energy_score_mean']))])
table.add_row(
[
"Duplicates:",
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['duplicates_cls_analysis']['ignorance_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['duplicates_cls_analysis']['brier_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['duplicates_reg_analysis']['ignorance_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['duplicates_reg_analysis']['energy_score_mean']))])
table.add_row(
[
"Localization Errors:",
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['localization_errors_cls_analysis']['ignorance_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['localization_errors_cls_analysis']['brier_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['localization_errors_reg_analysis']['ignorance_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['localization_errors_reg_analysis']['energy_score_mean']))])
table.add_row(
[
"False Positives:",
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['false_positives_cls_analysis']['ignorance_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['false_positives_cls_analysis']['brier_score_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['false_positives_reg_analysis']['total_entropy_mean'])),
'{:.4f}'.format(
np.nanmean(probabilistic_detection_final_dict['false_positives_reg_analysis']['fp_energy_score_mean']))])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_averaged_{}.txt'.format(
min_allowed_score))
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
table = PrettyTable()
table.field_names = (['Cls Marginal Calibration Error',
'Reg Expected Calibration Error',
'Reg Maximum Calibration Error'])
table.add_row(
[
'{:.4f}'.format(
calibration_final_dict['cls_marginal_calibration_error']), '{:.4f}'.format(
calibration_final_dict['reg_expected_calibration_error']), '{:.4f}'.format(
calibration_final_dict['reg_maximum_calibration_error'])])
text_file_name = os.path.join(
inference_output_dir,
'calibration_res_averaged_{}.txt'.format(
min_allowed_score))
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
print(table)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 8,797
| 41.095694
| 124
|
py
|
pmb-nll
|
pmb-nll-main/visualization/visualize_errors.py
|
import cv2
import numpy as np
import os
import ujson as json
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
# Project imports
from core.setup import setup_config, setup_arg_parser
from core.evaluation_tools import evaluation_utils
from core.visualization_tools.probabilistic_visualizer import ProbabilisticVisualizer as Visualizer
from probabilistic_inference.inference_utils import get_inference_output_dir
def main(
args,
cfg=None,
iou_min=None,
iou_correct=None,
min_allowed_score=None):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get thresholds to perform evaluation on
if iou_min is None:
iou_min = args.iou_min
if iou_correct is None:
iou_correct = args.iou_correct
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on VOC.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# get preprocessed instances
preprocessed_predicted_instances, preprocessed_gt_instances = evaluation_utils.get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score)
# get metacatalog and image infos
meta_catalog = MetadataCatalog.get(args.test_dataset)
images_info = json.load(open(meta_catalog.json_file, 'r'))['images']
# Loop over all images and visualize errors
for image_info in images_info:
image_id = image_info['id']
image = cv2.imread(
os.path.join(
meta_catalog.image_root,
image_info['file_name']))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
predicted_box_means = {
image_id: preprocessed_predicted_instances['predicted_boxes'][image_id]}
predicted_box_covariances = {
image_id: preprocessed_predicted_instances['predicted_covar_mats'][image_id]}
predicted_cls_probs = {
image_id: preprocessed_predicted_instances['predicted_cls_probs'][image_id]}
gt_box_means = {
image_id: preprocessed_gt_instances['gt_boxes'][image_id]}
gt_cat_idxs = {
image_id: preprocessed_gt_instances['gt_cat_idxs'][image_id]}
# Perform matching
matched_results = evaluation_utils.match_predictions_to_groundtruth(
predicted_box_means,
predicted_cls_probs,
predicted_box_covariances,
gt_box_means,
gt_cat_idxs,
iou_min=iou_min,
iou_correct=iou_correct)
true_positives = matched_results['true_positives']
duplicates = matched_results['duplicates']
localization_errors = matched_results['localization_errors']
false_positives = matched_results['false_positives']
false_negatives = matched_results['false_negatives']
# Plot True Positive Detections In Blue
v = Visualizer(
image,
meta_catalog,
scale=2.0)
gt_boxes = true_positives['gt_box_means'].cpu().numpy()
true_positive_boxes = true_positives['predicted_box_means'].cpu(
).numpy()
false_positives_boxes = false_positives['predicted_box_means'].cpu(
).numpy()
duplicates_boxes = duplicates['predicted_box_means'].cpu().numpy()
localization_errors_boxes = localization_errors['predicted_box_means'].cpu(
).numpy()
# Get category labels
gt_cat_idxs = true_positives['gt_cat_idxs'].cpu().numpy()
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset).thing_dataset_id_to_contiguous_id
thing_dataset_id_to_contiguous_id = evaluation_utils.get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg, args, train_thing_dataset_id_to_contiguous_id, test_thing_dataset_id_to_contiguous_id)
class_list = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).as_dict()['thing_classes']
if gt_cat_idxs.shape[0] > 0:
gt_labels = [class_list[thing_dataset_id_to_contiguous_id[gt_class]]
for gt_class in gt_cat_idxs[:, 0]]
else:
gt_labels = []
if cfg.MODEL.META_ARCHITECTURE != "ProbabilisticRetinaNet":
if len(true_positives['predicted_cls_probs'] > 0):
_, true_positive_classes = true_positives['predicted_cls_probs'][:, :-1].max(
1)
else:
true_positive_classes = np.array([])
if len(duplicates['predicted_cls_probs']) > 0:
_, duplicates_classes = duplicates['predicted_cls_probs'][:, :-1].max(
1)
else:
duplicates_classes = np.array([])
if len(localization_errors['predicted_cls_probs']) > 0:
_, localization_errors_classes = localization_errors['predicted_cls_probs'][:, :-1].max(
1)
else:
localization_errors_classes = np.array([])
if len(false_positives['predicted_cls_probs']) > 0:
_, false_positives_classes = false_positives['predicted_cls_probs'][:, :-1].max(
1)
else:
false_positives_classes = np.array([])
else:
if len(true_positives['predicted_cls_probs'] > 0):
_, true_positive_classes = true_positives['predicted_cls_probs'].max(
1)
else:
true_positive_classes = np.array([])
if len(duplicates['predicted_cls_probs']) > 0:
_, duplicates_classes = duplicates['predicted_cls_probs'].max(
1)
else:
duplicates_classes = np.array([])
if len(localization_errors['predicted_cls_probs']) > 0:
_, localization_errors_classes = localization_errors['predicted_cls_probs'].max(
1)
else:
localization_errors_classes = np.array([])
if len(false_positives['predicted_cls_probs']) > 0:
_, false_positives_classes = false_positives['predicted_cls_probs'].max(
1)
else:
false_positives_classes = np.array([])
if len(true_positives['predicted_cls_probs'] > 0):
true_positive_classes = true_positive_classes.cpu(
).numpy()
true_positive_labels = [class_list[tp_class]
for tp_class in true_positive_classes]
else:
true_positive_labels = []
if len(duplicates['predicted_cls_probs']) > 0:
duplicates_classes = duplicates_classes.cpu(
).numpy()
duplicates_labels = [class_list[d_class]
for d_class in duplicates_classes]
else:
duplicates_labels = []
if len(localization_errors['predicted_cls_probs']) > 0:
localization_errors_classes = localization_errors_classes.cpu(
).numpy()
localization_errors_labels = [class_list[le_class]
for le_class in localization_errors_classes]
else:
localization_errors_labels = []
if len(false_positives['predicted_cls_probs']) > 0:
false_positives_classes = false_positives_classes.cpu(
).numpy()
false_positives_labels = [class_list[fp_class]
for fp_class in false_positives_classes]
else:
false_positives_labels = []
# Overlay true positives in blue
_ = v.overlay_instances(
boxes=gt_boxes,
assigned_colors=[
'lime' for _ in gt_boxes],
labels=gt_labels,
alpha=1.0)
plotted_true_positive_boxes = v.overlay_instances(
boxes=true_positive_boxes,
assigned_colors=[
'dodgerblue' for _ in true_positive_boxes],
alpha=1.0,
labels=true_positive_labels)
cv2.imshow(
'True positive detections with IOU greater than {}'.format(iou_correct),
cv2.cvtColor(
plotted_true_positive_boxes.get_image(),
cv2.COLOR_RGB2BGR))
# Plot False Positive Detections In Red
v = Visualizer(
image,
meta_catalog,
scale=2.0)
_ = v.overlay_instances(
boxes=gt_boxes,
assigned_colors=[
'lime' for _ in gt_boxes],
labels=gt_labels,
alpha=0.7)
plotted_false_positive_boxes = v.overlay_instances(
boxes=false_positives_boxes,
assigned_colors=[
'red' for _ in false_positives_boxes],
alpha=1.0,
labels=false_positives_labels)
cv2.imshow(
'False positive detections with IOU less than {}'.format(iou_min),
cv2.cvtColor(
plotted_false_positive_boxes.get_image(),
cv2.COLOR_RGB2BGR))
# Plot Duplicates
v = Visualizer(
image,
meta_catalog,
scale=2.0)
_ = v.overlay_instances(
boxes=gt_boxes,
assigned_colors=[
'lime' for _ in gt_boxes],
labels=gt_labels,
alpha=0.7)
plotted_duplicates_boxes = v.overlay_instances(
boxes=duplicates_boxes,
assigned_colors=[
'magenta' for _ in duplicates_boxes],
alpha=1.0,
labels=duplicates_labels)
cv2.imshow(
'Duplicate Detections',
cv2.cvtColor(
plotted_duplicates_boxes.get_image(),
cv2.COLOR_RGB2BGR))
# Plot localization errors
v = Visualizer(
image,
meta_catalog,
scale=2.0)
_ = v.overlay_instances(
boxes=gt_boxes,
assigned_colors=[
'lime' for _ in gt_boxes],
labels=gt_labels,
alpha=0.7)
plotted_localization_errors_boxes = v.overlay_instances(
boxes=localization_errors_boxes,
assigned_colors=['aqua' for _ in localization_errors_boxes],
alpha=1.0,
labels=localization_errors_labels)
cv2.imshow(
'Detections with localization errors between minimum IOU = {} and maximum IOU = {}'.format(
iou_min, iou_correct), cv2.cvtColor(
plotted_localization_errors_boxes.get_image(), cv2.COLOR_RGB2BGR))
# Plot False Negatives Detections In Brown
if len(false_negatives['gt_box_means']) > 0:
false_negatives_boxes = false_negatives['gt_box_means'].cpu(
).numpy()
false_negatives_classes = false_negatives['gt_cat_idxs'].cpu(
).numpy()
false_negatives_labels = [class_list[thing_dataset_id_to_contiguous_id[gt_class[0]]]
for gt_class in false_negatives_classes.tolist()]
else:
false_negatives_boxes = np.array([])
false_negatives_labels = []
v = Visualizer(
image,
meta_catalog,
scale=2.0)
plotted_false_negative_boxes = v.overlay_instances(
boxes=false_negatives_boxes,
assigned_colors=[
'coral' for _ in false_negatives_boxes],
alpha=1.0,
labels=false_negatives_labels)
cv2.imshow(
'False negative ground truth.',
cv2.cvtColor(
plotted_false_negative_boxes.get_image(),
cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 13,513
| 36.643454
| 120
|
py
|
pmb-nll
|
pmb-nll-main/visualization/visualize_predictions.py
|
import cv2
import numpy as np
import os
import ujson as json
from scipy.stats import entropy
from matplotlib import cm
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
# Project imports
from core.setup import setup_config, setup_arg_parser
from core.evaluation_tools import evaluation_utils
from core.visualization_tools.probabilistic_visualizer import ProbabilisticVisualizer
from probabilistic_inference.inference_utils import get_inference_output_dir
# noinspection PyTypeChecker
def main(
args,
cfg=None,
min_allowed_score=None):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get thresholds to perform evaluation on
if min_allowed_score is None:
# Check if F-1 Score has been previously computed.
try:
with open(os.path.join(inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# get preprocessed instances
preprocessed_predicted_instances, preprocessed_gt_instances = evaluation_utils.get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score)
# get metacatalog and image infos
meta_catalog = MetadataCatalog.get(args.test_dataset)
images_info = json.load(open(meta_catalog.json_file, 'r'))['images']
# Loop over all images and visualize errors
for image_info in images_info:
image_id = image_info['id']
image = cv2.imread(
os.path.join(
meta_catalog.image_root,
image_info['file_name']))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
v = ProbabilisticVisualizer(
image,
meta_catalog,
scale=1.5)
class_list = v.metadata.as_dict()['thing_classes']
predicted_box_means = preprocessed_predicted_instances['predicted_boxes'][image_id].cpu(
).numpy()
gt_box_means = preprocessed_gt_instances['gt_boxes'][image_id].cpu(
).numpy()
predicted_box_covariances = preprocessed_predicted_instances[
'predicted_covar_mats'][image_id].cpu(
).numpy()
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs'][image_id]
if predicted_cls_probs.shape[0] > 0:
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticGeneralizedRCNN" or cfg.MODEL.META_ARCHITECTURE == "ProbabilisticDetr":
predicted_scores, predicted_classes = predicted_cls_probs[:, :-1].max(
1)
predicted_entropies = entropy(
predicted_cls_probs.cpu().numpy(), base=2)
else:
predicted_scores, predicted_classes = predicted_cls_probs.max(
1)
predicted_entropies = entropy(
np.stack(
(predicted_scores.cpu().numpy(),
1 - predicted_scores.cpu().numpy())),
base=2)
predicted_classes = predicted_classes.cpu(
).numpy()
predicted_classes = [class_list[p_class]
for p_class in predicted_classes]
assigned_colors = cm.autumn(predicted_entropies)
predicted_scores = predicted_scores.cpu().numpy()
else:
predicted_scores=np.array([])
predicted_classes = np.array([])
assigned_colors = []
gt_cat_idxs = preprocessed_gt_instances['gt_cat_idxs'][image_id].cpu(
).numpy()
thing_dataset_id_to_contiguous_id = meta_catalog.thing_dataset_id_to_contiguous_id
if gt_cat_idxs.shape[0] > 0:
gt_labels = [class_list[thing_dataset_id_to_contiguous_id[gt_class]]
for gt_class in gt_cat_idxs[:, 0]]
else:
gt_labels = []
# noinspection PyTypeChecker
_ = v.overlay_covariance_instances(
boxes=gt_box_means,
assigned_colors=[
'lightgreen' for _ in gt_box_means],
labels=gt_labels,
alpha=1.0)
plotted_detections = v.overlay_covariance_instances(
boxes=predicted_box_means,
covariance_matrices=predicted_box_covariances,
assigned_colors=assigned_colors,
alpha=1.0,
labels=predicted_classes)
cv2.imshow(
'Detected Instances.',
cv2.cvtColor(
plotted_detections.get_image(),
cv2.COLOR_RGB2BGR))
cv2.waitKey()
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 5,668
| 34.879747
| 131
|
py
|
FDS
|
FDS-main/main.py
|
"""
This is the base code to learn the learning rate, momentum and weight decay
non-greedily with forward mode differentiation, over long horizons (e.g. CIFAR10)
"""
import os
import time
import shutil
import torch
import torch.optim as optim
import pickle
from utils.logger import *
from utils.helpers import *
from utils.datasets import *
from models.selector import *
class MetaLearner(object):
def __init__(self, args):
self.args = args
## Optimization
self.hypers_init()
self.cross_entropy = nn.CrossEntropyLoss()
## Experiment Set Up
self.best_outer_step = 0
self.best_validation_acc = 0
ns, learnables = (self.args.n_lrs, self.args.n_moms, self.args.n_wds), (self.args.learn_lr, self.args.learn_mom, self.args.learn_wd)
self.all_lr_schedules, self.all_mom_schedules, self.all_wd_schedules = [torch.zeros((self.args.n_outer_steps+1, n)) for n in ns] #+1 since save init schedules and last schedule
self.all_lr_raw_grads, self.all_mom_raw_grads, self.all_wd_raw_grads = [torch.zeros((self.args.n_outer_steps, n)) if l else None for (n,l) in zip(ns, learnables)]
self.all_lr_smooth_grads, self.all_mom_smooth_grads, self.all_wd_smooth_grads = [torch.zeros((self.args.n_outer_steps, n)) if l else None for (n,l) in zip(ns, learnables)]
self.experiment_path = os.path.join(self.args.log_directory_path, self.args.experiment_name)
self.checkpoint_path = os.path.join(self.experiment_path, 'checkpoint.pth.tar')
if os.path.exists(self.experiment_path):
if self.args.use_gpu and os.path.isfile(self.checkpoint_path):
raise NotImplementedError(f"Experiment folder {self.experiment_path} already exists") #TODO: restore code from ckpt
else:
shutil.rmtree(self.experiment_path) # clear debug logs on cpu
os.makedirs(self.experiment_path)
else:
os.makedirs(self.experiment_path)
copy_file(os.path.realpath(__file__), self.experiment_path) # save this python file in logs folder
self.logger = Logger(self.experiment_path, 'run_results.csv')
## Save and Print Args
print('\n---------')
with open(os.path.join(self.experiment_path, 'args.txt'), 'w+') as f:
for k, v in self.args.__dict__.items():
print(k, v)
f.write("{} \t {}\n".format(k, v))
print('---------\n')
print('\nLogging every {} outer_steps and every {} epochs per outer_step\n'.format(self.args.outer_step_log_freq, self.args.epoch_log_freq))
def hypers_init(self):
""" initialize hyperparameters """
self.inner_lrs = self.args.inner_lr_init*torch.ones(self.args.n_lrs, device=self.args.device)
self.inner_lrs_grad = torch.zeros_like(self.inner_lrs) # lr hypergradient
self.lr_hypersigns = torch.zeros(self.args.n_lrs, device=self.args.device)
self.lr_step_sizes = self.args.lr_init_step_size*torch.ones(self.args.n_lrs, device=self.args.device)
self.inner_moms = self.args.inner_mom_init*torch.ones(self.args.n_moms, device=self.args.device)
self.inner_moms_grad = torch.zeros_like(self.inner_moms)
self.mom_hypersigns = torch.zeros(self.args.n_moms, device=self.args.device)
self.mom_step_sizes = self.args.mom_init_step_size*torch.ones(self.args.n_moms, device=self.args.device)
self.inner_wds = self.args.inner_wd_init*torch.ones(self.args.n_wds, device=self.args.device)
self.inner_wds_grad = torch.zeros_like(self.inner_wds)
self.wd_hypersigns = torch.zeros(self.args.n_wds, device=self.args.device)
self.wd_step_sizes = self.args.wd_init_step_size*torch.ones(self.args.n_wds, device=self.args.device)
def get_hypers(self, epoch, batch_idx):
"""return hyperparameters to be used for given batch"""
lr_index = int(self.args.n_lrs * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches_for_this_outer_step)
lr = float(self.inner_lrs[lr_index])
mom_index = int(self.args.n_moms * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches_for_this_outer_step)
mom = float(self.inner_moms[mom_index])
wd_index = int(self.args.n_wds * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches_for_this_outer_step)
wd = float(self.inner_wds[wd_index])
return lr, mom, wd, lr_index, mom_index, wd_index
def to_prune(self, epoch, batch_idx, n_hypers):
""" Do we skip calculation of Z for this batch?"""
if self.args.pruning_ratio==0:
to_prune=False
else:
n_batches_per_hyper = int(self.n_total_batches_for_this_outer_step/n_hypers)
current_global_batch_idx = epoch*self.n_batches_per_epoch + batch_idx
current_global_batch_idx_per_hyper = current_global_batch_idx % n_batches_per_hyper
if self.args.pruning_mode=='alternate': #rounded to nearest integer, so r=0.25 -> prune 1 in 4 but r=0.21 -> 1 in 4 also
if self.args.pruning_ratio>=0.5: #at least 1 in 2 pruned
keep_freq = int(1/(1-self.args.pruning_ratio))
to_prune = (current_global_batch_idx_per_hyper % keep_freq != 0)
else:
prune_freq = int(1/(self.args.pruning_ratio))
to_prune = (current_global_batch_idx_per_hyper % prune_freq == 0)
elif self.args.pruning_mode=='truncate':
to_prune = current_global_batch_idx_per_hyper < self.args.pruning_ratio*n_batches_per_hyper
return to_prune
def inner_loop(self):
"""
Compute Z for each hyperparameter to learn over all epochs in the run
"""
## Network
self.classifier = select_model(True, self.args.dataset, self.args.architecture,
self.args.init_type, self.args.init_param,
self.args.device).to(self.args.device)
self.classifier.train()
self.weights = self.classifier.get_param()
velocity = torch.zeros(self.weights.numel(), requires_grad=False, device=self.args.device)
## Forward Mode Init
if self.args.learn_lr:
self.n_batches_per_lr = 0
Z_lr = torch.zeros((self.weights.numel(), self.args.n_lrs), device=self.args.device)
C_lr = torch.zeros((self.weights.numel(), self.args.n_lrs), device=self.args.device)
else:
Z_lr = None
if self.args.learn_mom:
self.n_batches_per_mom = 0
Z_mom = torch.zeros((self.weights.numel(), self.args.n_moms), device=self.args.device)
C_mom = torch.zeros((self.weights.numel(), self.args.n_moms), device=self.args.device)
else:
Z_mom = None
if self.args.learn_wd:
self.n_batches_per_wd = 0
Z_wd = torch.zeros((self.weights.numel(), self.args.n_wds), device=self.args.device)
C_wd = torch.zeros((self.weights.numel(), self.args.n_wds), device=self.args.device)
else:
Z_wd = None
## Inner Loop Over All Epochs
for epoch in range(self.n_inner_epochs_for_this_outer_step):
t0_epoch = time.time()
for batch_idx, (x_train, y_train) in enumerate(self.train_loader):
lr, mom, wd, lr_index, mom_index, wd_index = self.get_hypers(epoch, batch_idx)
#print(f'epoch {epoch} batch {batch_idx} -- lr idx {lr_index} -- mom idx {mom_index} -- wd index {wd_index}')
x_train, y_train = x_train.to(device=self.args.device), y_train.to(device=self.args.device)
train_logits = self.classifier.forward_with_param(x_train, self.weights)
train_loss = self.cross_entropy(train_logits, y_train)
grads = torch.autograd.grad(train_loss, self.weights, create_graph=True)[0]
if self.args.clamp_grads: grads.clamp_(-self.args.clamp_grads_range, self.args.clamp_grads_range)
if self.args.learn_lr and not self.to_prune(epoch, batch_idx, self.args.n_lrs):
#print('update lr')
self.n_batches_per_lr += 1
H_times_Z = torch.zeros((self.weights.numel(), self.args.n_lrs),device=self.args.device)
for j in range(lr_index + 1):
retain = (j != lr_index) or self.args.learn_mom or self.args.learn_wd
H_times_Z[:, j] = torch.autograd.grad(grads @ Z_lr[:, j], self.weights, retain_graph=retain)[0]
if self.args.clamp_HZ: H_times_Z.clamp_(-self.args.clamp_HZ_range, self.args.clamp_HZ_range)
A_times_Z = Z_lr*(1 - lr*wd) - lr*H_times_Z
B = - mom*lr*C_lr
B[:,lr_index] -= grads.detach() + wd*self.weights.detach() + mom*velocity
C_lr = mom*C_lr + H_times_Z + wd*Z_lr
Z_lr = A_times_Z + B
if self.args.learn_mom and not self.to_prune(epoch, batch_idx, self.args.n_moms):
#print('update mom')
self.n_batches_per_mom += 1
H_times_Z = torch.zeros((self.weights.numel(), self.args.n_moms),device=self.args.device)
for j in range(mom_index + 1):
retain = (j != mom_index) or self.args.learn_wd
H_times_Z[:, j] = torch.autograd.grad(grads @ Z_mom[:, j], self.weights, retain_graph=retain)[0]
if self.args.clamp_HZ: H_times_Z.clamp_(-self.args.clamp_HZ_range, self.args.clamp_HZ_range)
A_times_Z = (1 - lr*wd)*Z_mom - lr*H_times_Z
B = -lr*mom*C_mom
B[:, mom_index] -= lr*velocity
C_mom = mom*C_mom + H_times_Z + wd * Z_mom
C_mom[:, mom_index] += velocity
Z_mom = A_times_Z + B
if self.args.learn_wd and not self.to_prune(epoch, batch_idx, self.args.n_wds):
#print('update wd')
self.n_batches_per_wd += 1
H_times_Z = torch.zeros((self.weights.numel(), self.args.n_wds),device=self.args.device)
for j in range(wd_index + 1):
retain = (j != wd_index)
H_times_Z[:, j] = torch.autograd.grad(grads @ Z_wd[:, j], self.weights, retain_graph=retain)[0]
if self.args.clamp_HZ: H_times_Z.clamp_(-self.args.clamp_HZ_range, self.args.clamp_HZ_range)
A_times_Z = (1 - lr*wd)*Z_wd - lr*H_times_Z
B = - lr*mom*C_wd
B[:, wd_index] -= lr*self.weights.detach()
C_wd = mom*C_wd + H_times_Z + wd*Z_wd
C_wd[:, wd_index] += self.weights.detach()
Z_wd = A_times_Z + B
## SGD inner update
self.weights.detach_(), grads.detach_()
velocity = velocity*mom + (grads + wd*self.weights)
self.weights = self.weights - lr*velocity
self.weights.requires_grad_()
print(f'--- Ran epoch {epoch+1} in {format_time(time.time()-t0_epoch)} ---')
if self.args.learn_lr: self.n_batches_per_lr /= self.args.n_lrs # each hyper gets same # of updates regardless of pruning mode
if self.args.learn_mom: self.n_batches_per_mom /= self.args.n_moms
if self.args.learn_wd: self.n_batches_per_wd /= self.args.n_wds
return Z_lr, Z_mom, Z_wd
def outer_step(self, outer_step_idx, Z_lr_final, Z_mom_final, Z_wd_final):
"""
Calculate hypergradients and update hyperparameters accordingly.
"""
## Calculate validation gradients with final weights of inner loop
self.running_val_grad = AggregateTensor()
for batch_idx, (x_val, y_val) in enumerate(self.val_loader): #need as big batches as train mode for BN train mode
x_val, y_val = x_val.to(device=self.args.device), y_val.to(device=self.args.device)
val_logits = self.classifier.forward_with_param(x_val, self.weights)
val_loss = self.cross_entropy(val_logits, y_val)
dLval_dw = torch.autograd.grad(val_loss, self.weights)[0]
self.running_val_grad.update(dLval_dw)
## Update hyperparams
print('')
if self.args.learn_lr:
self.inner_lrs_grad = self.running_val_grad.avg() @ Z_lr_final / self.n_batches_per_lr
self.all_lr_raw_grads[outer_step_idx] = self.inner_lrs_grad.detach()
print('RAW LR GRADS: ', ["{:.2E}".format(float(i)) for i in self.inner_lrs_grad])
new_hypersigns = torch.sign(self.inner_lrs_grad) #Nans and zero have sign 0
flipped_signs = self.lr_hypersigns*new_hypersigns # 1, -1 or 0
multipliers = torch.tensor([self.args.lr_step_decay if f==-1.0 else 1.0 for f in flipped_signs], device=self.args.device)
self.lr_step_sizes = multipliers*self.lr_step_sizes
self.lr_hypersigns = new_hypersigns
deltas = new_hypersigns*self.lr_step_sizes # how much to change hyperparameter by
self.lr_converged = ((self.lr_step_sizes/self.inner_lrs) < self.args.converged_frac).all()
self.inner_lrs = self.inner_lrs - deltas
self.all_lr_smooth_grads[outer_step_idx] = deltas
print('SMOOTH LR DELTAS: ', ["{:02.2f}".format(float(i)) for i in deltas])
if self.args.learn_mom:
self.inner_moms_grad = self.running_val_grad.avg() @ Z_mom_final / self.n_batches_per_mom
self.all_mom_raw_grads[outer_step_idx] = self.inner_moms_grad.detach()
print('RAW MOM GRADS: ', ["{:.2E}".format(float(i)) for i in self.inner_moms_grad])
new_hypersigns = torch.sign(self.inner_moms_grad) #Nans and zero have sign 0
flipped_signs = self.mom_hypersigns*new_hypersigns # 1, -1 or 0
multipliers = torch.tensor([self.args.mom_step_decay if f==-1.0 else 1.0 for f in flipped_signs], device=self.args.device)
self.mom_step_sizes = multipliers*self.mom_step_sizes
self.mom_hypersigns = new_hypersigns
deltas = new_hypersigns*self.mom_step_sizes # how much to change hyperparameter by
self.mom_converged = ((self.mom_step_sizes/self.inner_moms) < self.args.converged_frac).all()
self.inner_moms = self.inner_moms - deltas
self.all_mom_smooth_grads[outer_step_idx] = deltas
print('SMOOTH MOM DELTAS: ', ["{:02.2f}".format(float(i)) for i in deltas])
if self.args.learn_wd:
self.inner_wds_grad = self.running_val_grad.avg() @ Z_wd_final / self.n_batches_per_wd
self.all_wd_raw_grads[outer_step_idx] = self.inner_wds_grad.detach()
print('RAW WD GRADS: ', ["{:.2E}".format(float(i)) for i in self.inner_wds_grad])
new_hypersigns = torch.sign(self.inner_wds_grad) #Nans and zero have sign 0
flipped_signs = self.wd_hypersigns*new_hypersigns # 1, -1 or 0
multipliers = torch.tensor([self.args.wd_step_decay if f==-1.0 else 1.0 for f in flipped_signs], device=self.args.device)
self.wd_step_sizes = multipliers*self.wd_step_sizes
self.wd_hypersigns = new_hypersigns
deltas = new_hypersigns*self.wd_step_sizes # how much to change hyperparameter by
self.wd_converged = ((self.wd_step_sizes/self.inner_wds) < self.args.converged_frac).all()
self.inner_wds = self.inner_wds - deltas
self.all_wd_smooth_grads[outer_step_idx] = deltas
print('SMOOTH WD DELTAS: ', ["{:02.2f}".format(float(i)) for i in deltas])
self.converged = (self.lr_converged if self.args.learn_lr else True) and (self.mom_converged if self.args.learn_mom else True) and (self.wd_converged if self.args.learn_wd else True)
def run(self):
""" Run meta learning experiment """
t0 = time.time()
for outer_step_idx in range(self.args.n_outer_steps): # number of outer steps
## Set up
self.n_inner_epochs_for_this_outer_step = self.args.n_inner_epochs_per_outer_steps[outer_step_idx]
print(f'\nOuter step {outer_step_idx+1}/{self.args.n_outer_steps} --- current budget of {self.n_inner_epochs_for_this_outer_step} epochs --- using:')
print('lrs = ', [float('{:02.2e}'.format(el)) for el in self.inner_lrs],
'moms = ', [float('{:02.2e}'.format(el)) for el in self.inner_moms],
'wds = ', [float('{:02.2e}'.format(el)) for el in self.inner_wds])
self.all_lr_schedules[outer_step_idx], self.all_mom_schedules[outer_step_idx], self.all_wd_schedules[outer_step_idx] = self.inner_lrs.detach(), self.inner_moms.detach(), self.inner_wds.detach()
self.save_state(outer_step_idx) # state and lrs saved correspond to those set at the beginning of the outer_step
## New data split for each outer_step
self.train_loader, self.val_loader, self.test_loader = get_loaders(datasets_path=self.args.datasets_path,
dataset=self.args.dataset,
train_batch_size=self.args.train_batch_size,
val_batch_size=self.args.val_batch_size,
val_source='train',
val_train_fraction=self.args.val_train_fraction,
val_train_overlap=self.args.val_train_overlap,
workers=self.args.workers,
train_infinite=False,
val_infinite=False,
cutout=self.args.cutout,
cutout_length=self.args.cutout_length,
cutout_prob=self.args.cutout_prob)
self.n_batches_per_epoch = len(self.train_loader)
self.n_total_batches_for_this_outer_step = self.n_inner_epochs_for_this_outer_step * self.n_batches_per_epoch
## Update Hypers
Z_lr_final, Z_mom_final, Z_wd_final = self.inner_loop()
self.outer_step(outer_step_idx, Z_lr_final, Z_mom_final, Z_wd_final)
## See if schedule used for this outer_step led to best validation
_, val_acc = self.validate(self.weights)
_, test_acc = self.test(self.weights)
if val_acc > self.best_validation_acc:
self.best_validation_acc = val_acc
self.best_outer_step = outer_step_idx
#print(f'Best validation acc at outer_step idx {outer_step_idx}')
## Break if all hyperparameters have converged
if self.converged:
print('STOP HYPERTRAINING BECAUSE ALL HYPERPARAMETERS HAVE CONVERGED')
break
## Time
time_so_far = time.time() - t0
self.logger.write({'budget': self.n_inner_epochs_for_this_outer_step, 'time': time_so_far,
'val_acc': val_acc, 'test_acc': test_acc})
print(f'final val acc {100*val_acc:.2g} -- final test_acc: {100*test_acc:.2g}')
## Logging Final Metrics
self.all_lr_schedules[outer_step_idx+1], self.all_mom_schedules[outer_step_idx+1], self.all_wd_schedules[outer_step_idx+1] = self.inner_lrs.detach(), self.inner_moms.detach(), self.inner_wds.detach() #last schedule was never trained on
self.save_state(outer_step_idx+1)
avg_test_loss, avg_test_acc = self.test(self.weights)
return avg_test_acc
def validate(self, weights, fraction=1.0):
""" Fraction allows trading accuracy for speed when logging many times"""
self.classifier.eval()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
with torch.no_grad():
for batch_idx, (x, y) in enumerate(self.val_loader):
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier.forward_with_param(x, weights)
running_loss.update(self.cross_entropy(logits, y), x.shape[0])
running_acc.update(accuracy(logits, y, topk=(1,))[0], x.shape[0])
if fraction < 1 and (batch_idx + 1) >= fraction*len(self.val_loader):
break
self.classifier.train()
return float(running_loss.avg()), float(running_acc.avg())
def test(self, weights, fraction=1.0):
""" Fraction allows trading accuracy for speed when logging many times"""
self.classifier.eval()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
with torch.no_grad():
for batch_idx, (x, y) in enumerate(self.test_loader):
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier.forward_with_param(x, weights)
running_loss.update(self.cross_entropy(logits, y), x.shape[0])
running_acc.update(accuracy(logits, y, topk=(1,))[0], x.shape[0])
if fraction < 1 and (batch_idx + 1) >= fraction*len(self.test_loader):
break
self.classifier.train()
return float(running_loss.avg()), float(running_acc.avg())
def save_state(self, outer_step_idx):
torch.save({'args': self.args,
'outer_step_idx': outer_step_idx,
'best_outer_step': self.best_outer_step,
'best_validation_acc': self.best_validation_acc,
'all_lr_schedules': self.all_lr_schedules,
'all_lr_raw_grads': self.all_lr_raw_grads,
'all_lr_smooth_grads': self.all_lr_smooth_grads,
'all_mom_schedules': self.all_mom_schedules,
'all_mom_raw_grads': self.all_mom_raw_grads,
'all_mom_smooth_grads': self.all_mom_smooth_grads,
'all_wd_schedules': self.all_wd_schedules,
'all_wd_raw_grads': self.all_wd_raw_grads,
'all_wd_smooth_grads': self.all_wd_smooth_grads}, self.checkpoint_path)
class BaseLearner(object):
"""
Retrain from scratch using learned schedule and
whole training set
"""
def __init__(self, args, lr_schedule, mom_schedule, wd_schedule, log_name):
self.args = args
self.inner_lrs = lr_schedule
self.inner_moms = mom_schedule
self.inner_wds = wd_schedule
## Loaders
self.args.val_source = 'test' # retrain on full train set from scratch
self.train_loader, _, self.test_loader = get_loaders(datasets_path=self.args.datasets_path,
dataset=self.args.dataset,
train_batch_size=self.args.train_batch_size,
val_batch_size=self.args.val_batch_size,
val_source=self.args.val_source,
val_train_fraction=self.args.val_train_fraction,
val_train_overlap=self.args.val_train_overlap,
workers=self.args.workers,
train_infinite=False,
val_infinite=False,
cutout=self.args.cutout,
cutout_length=self.args.cutout_length,
cutout_prob=self.args.cutout_prob)
self.n_batches_per_epoch = len(self.train_loader)
self.n_total_batches = self.args.retrain_n_epochs * self.n_batches_per_epoch
## Optimizer
self.classifier = select_model(False, self.args.dataset, self.args.architecture,
self.args.init_type, self.args.init_param,
self.args.device).to(self.args.device)
self.optimizer = optim.SGD(self.classifier.parameters(), lr=0.0, momentum=0.0, weight_decay=0.0) #set hypers manually later
self.cross_entropy = nn.CrossEntropyLoss()
### Set up
self.experiment_path = os.path.join(args.log_directory_path, args.experiment_name)
self.logger = Logger(self.experiment_path, log_name)
def log_init(self):
self.running_train_loss, self.running_train_acc = AggregateTensor(), AggregateTensor()
def log(self, epoch, avg_train_loss, avg_train_acc):
avg_test_loss, avg_test_acc = self.test(fraction=0.1 if epoch!=self.args.retrain_n_epochs-1 else 1)
print('Retrain epoch {}/{} --- Train Acc: {:02.2f}% -- Test Acc: {:02.2f}%'.format(epoch+1, self.args.retrain_n_epochs, avg_train_acc * 100, avg_test_acc * 100))
self.logger.write({'train_loss': avg_train_loss, 'train_acc': avg_train_acc, 'test_loss': avg_test_loss, 'test_acc': avg_test_acc})
self.log_init()
def get_hypers(self, epoch, batch_idx):
"""return hyperparameters to be used for given batch"""
lr_index = int(self.args.n_lrs * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches)
lr = float(self.inner_lrs[lr_index])
mom_index = int(self.args.n_moms * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches)
mom = float(self.inner_moms[mom_index])
wd_index = int(self.args.n_wds * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches)
wd = float(self.inner_wds[wd_index])
return lr, mom, wd, lr_index, mom_index, wd_index
def set_hypers(self, epoch, batch_idx):
lr, mom, wd, lr_index, mom_index, wd_index = self.get_hypers(epoch, batch_idx)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
param_group['momentum'] = mom
param_group['weight_decay'] = wd
#print(f'Setting: lr={lr}, mom={mom}, wd={wd}')
def run(self):
for epoch in range(self.args.retrain_n_epochs):
avg_train_loss, avg_train_acc = self.train(epoch)
self.log(epoch, avg_train_loss, avg_train_acc)
test_loss, test_acc = self.test()
return test_acc
def train(self, epoch):
self.classifier.train()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
for batch_idx, (x,y) in enumerate(self.train_loader):
self.set_hypers(epoch, batch_idx)
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier(x)
loss = self.cross_entropy(input=logits, target=y)
acc1 = accuracy(logits.data, y, topk=(1,))[0]
running_loss.update(loss, x.shape[0])
running_acc.update(acc1, x.shape[0])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return float(running_loss.avg()), float(running_acc.avg())
def test(self, fraction=1.0):
""" fraction allows trading accuracy for speed when logging many times"""
self.classifier.eval()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
with torch.no_grad():
for batch_idx, (x, y) in enumerate(self.test_loader):
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier(x)
running_loss.update(self.cross_entropy(logits, y), x.shape[0])
running_acc.update(accuracy(logits, y, topk=(1,))[0], x.shape[0])
if fraction < 1 and (batch_idx + 1) >= fraction*len(self.test_loader):
break
self.classifier.train()
return float(running_loss.avg()), float(running_acc.avg())
# ________________________________________________________________________________
# ________________________________________________________________________________
# ________________________________________________________________________________
def make_experiment_name(args):
"""
Warning: Windows can have a weird behaviour for long filenames.
Protip: switch to Ubuntu ;)
"""
## Main
nepr = ''.join([str(i) for i in args.n_inner_epochs_per_outer_steps])
experiment_name = f'FSL_{args.dataset}_{args.architecture}_nepr{nepr}'
experiment_name += f'_init{args.init_type}-{args.init_param}'
experiment_name += f'_tbs{args.train_batch_size}'
if args.cutout: experiment_name += f'_cutout-p{args.cutout_prob}'
if args.clamp_HZ: experiment_name += f'_HZclamp{args.clamp_HZ_range}'
experiment_name += f'_S{args.seed}'
return experiment_name
def main(args):
set_torch_seeds(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
t0 = time.time()
meta_learner = MetaLearner(args)
meta_test_acc = meta_learner.run()
total_time = time.time() - t0
to_print = '\n\nMETA TEST ACC: {:02.2f}%'.format(meta_test_acc*100)
file_name = "final_meta_test_acc_{:02.2f}_total_time_{}".format(meta_test_acc*100, format_time(total_time))
create_empty_file(os.path.join(args.log_directory_path, args.experiment_name, file_name))
if args.retrain_from_scratch:
## Fetch schedules
# best_idx = meta_learner.best_outer_step
final_lr_schedule, final_mom_schedule, final_wd_schedule = meta_learner.all_lr_schedules[-1], meta_learner.all_mom_schedules[-1], meta_learner.all_wd_schedules[-1]
# best_lr_schedule, best_mom_schedule, best_wd_schedule = meta_learner.all_lr_schedules[best_idx], meta_learner.all_mom_schedules[best_idx], meta_learner.all_wd_schedules[best_idx]
del meta_learner
## Retrain Last
print(f'\n\n\n---------- RETRAINING FROM SCRATCH WITH LAST SCHEDULE (idx {args.n_outer_steps}) ----------')
print(f'lrs = {final_lr_schedule.tolist()}')
print(f'moms = {final_mom_schedule.tolist()}')
print(f'wds = {final_wd_schedule.tolist()}')
log_name = f'Rerun_last_outer_step.csv'
base_learner = BaseLearner(args, final_lr_schedule, final_mom_schedule, final_wd_schedule, log_name)
if args.use_gpu: torch.cuda.empty_cache()
base_test_acc = base_learner.run()
to_print += '\nRE-RUN LAST SCHEDULE TEST ACC: {:02.2f}%'.format(base_test_acc*100)
file_name = "Rerun_last_test_acc_{:02.2f}".format(base_test_acc*100)
create_empty_file(os.path.join(args.log_directory_path, args.experiment_name, file_name))
# ## Retrain Best Val
# print(f'\n\n\n---------- RETRAINING FROM SCRATCH WITH BEST VAL SCHEDULE (idx {best_idx}) ----------')
# print(f'lrs = {best_lr_schedule.tolist()}')
# print(f'moms = {best_mom_schedule.tolist()}')
# print(f'wds = {best_wd_schedule.tolist()}')
#
# log_name = f'Rerun_best_outer_step_idx_{best_idx}.csv'
# base_learner = BaseLearner(args, best_lr_schedule, best_mom_schedule, best_wd_schedule, log_name)
# if args.use_gpu: torch.cuda.empty_cache()
# base_test_acc = base_learner.run()
# to_print += '\nRE-RUN BEST SCHEDULE TEST ACC: {:02.2f}%'.format(base_test_acc*100)
# file_name = "Rerun_best_test_acc_{:02.2f}".format(base_test_acc*100)
# create_empty_file(os.path.join(args.log_directory_path, args.experiment_name, file_name))
print(to_print)
if __name__ == "__main__":
import argparse
print('Running...')
parser = argparse.ArgumentParser(description='Welcome to GreedyGrad')
## Main
parser.add_argument('--learn_lr', type=str2bool, default=True)
parser.add_argument('--learn_mom', type=str2bool, default=True)
parser.add_argument('--learn_wd', type=str2bool, default=True)
parser.add_argument('--n_lrs', type=int, default=7)
parser.add_argument('--n_moms', type=int, default=1)
parser.add_argument('--n_wds', type=int, default=1)
parser.add_argument('--dataset', type=str, default='CIFAR10')
parser.add_argument('--n_inner_epochs_per_outer_steps', nargs='*', type=int, default=[1, 10, 10, 10, 10, 10, 10, 10, 10, 10], help='number of epochs to run for each outer step')
parser.add_argument('--pruning_mode', type=str, choices=['alternate', 'truncate'], default='alternate')
parser.add_argument('--pruning_ratio', type=float, default=0.0, help='<1, how many inner steps to skip Z calculation for expressed as a fraction of total inner steps per hyper')
## Architecture
parser.add_argument('--architecture', type=str, default='WRN-16-1')
parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal', 'zero', 'default'], help='network initialization scheme')
parser.add_argument('--init_param', type=float, default=1, help='network initialization param: gain, std, etc.')
parser.add_argument('--init_norm_weights', type=float, default=1, help='init gammas of BN')
## Inner Loop
parser.add_argument('--inner_lr_init', type=float, default=0, help='SGD inner learning rate init')
parser.add_argument('--inner_mom_init', type=float, default=0, help='SGD inner momentum init')
parser.add_argument('--inner_wd_init', type=float, default=0, help='SGD inner weight decay init')
parser.add_argument('--train_batch_size', type=int, default=256)
parser.add_argument('--clamp_grads', type=str2bool, default=True)
parser.add_argument('--clamp_grads_range', type=float, default=3, help='clamp inner grads for each batch to +/- that')
parser.add_argument('--cutout', type=str2bool, default=False)
parser.add_argument('--cutout_length', type=int, default=16)
parser.add_argument('--cutout_prob', type=float, default=1, help='clamp inner grads for each batch to +/- that')
## Outer Loop
parser.add_argument('--val_batch_size', type=int, default=500)
parser.add_argument('--val_train_fraction', type=float, default=0.05)
parser.add_argument('--val_train_overlap', type=str2bool, default=False, help='if True and val_source=train, val images are also in train set')
parser.add_argument('--lr_init_step_size', type=float, default=0.1, help='at each iteration grads changed so that each hyper can only change by this fraction (ignoring outer momentum)')
parser.add_argument('--mom_init_step_size', type=float, default=0.1)
parser.add_argument('--wd_init_step_size', type=float, default=3e-4)
parser.add_argument('--lr_step_decay', type=float, default=0.5, help='step size multiplied by this much if hypergrad sign changes')
parser.add_argument('--mom_step_decay', type=float, default=0.5, help='step size multiplied by this much if hypergrad sign changes')
parser.add_argument('--wd_step_decay', type=float, default=0.5, help='step size multiplied by this much if hypergrad sign changes')
parser.add_argument('--clamp_HZ', type=str2bool, default=True)
parser.add_argument('--clamp_HZ_range', type=float, default=1, help='clamp to +/- that')
parser.add_argument('--converged_frac', type=float, default=0.05, help='if steps are smaller than this percentage of hypers, stop experiment')
## Other
parser.add_argument('--retrain_from_scratch', type=str2bool, default=True, help='retrain from scratch with learned lr schedule')
parser.add_argument('--retrain_n_epochs', type=int, default=50, help='interpolates from learned schedule, -1 for same as n_inner_epochs_per_outer_steps[-1]')
parser.add_argument('--datasets_path', type=str, default="~/Datasets/Pytorch/")
parser.add_argument('--log_directory_path', type=str, default="./logs/")
parser.add_argument('--epoch_log_freq', type=int, default=1, help='every how many epochs to save summaries')
parser.add_argument('--outer_step_log_freq', type=int, default=1, help='every how many outer_steps to save the whole run')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--workers', type=int, default=0)
parser.add_argument('--use_gpu', type=str2bool, default=True)
args = parser.parse_args()
args.dataset_path = os.path.join(args.datasets_path, args.dataset)
args.use_gpu = args.use_gpu and torch.cuda.is_available()
args.device = torch.device('cuda') if args.use_gpu else torch.device('cpu')
assert args.lr_step_decay < 1
assert args.mom_step_decay < 1
assert args.wd_step_decay < 1
assert args.converged_frac < 1
if args.retrain_n_epochs < 0: args.retrain_n_epochs = args.n_inner_epochs_per_outer_steps[-1]
assert args.pruning_ratio <= 1
args.n_outer_steps = len(args.n_inner_epochs_per_outer_steps)
args.experiment_name = make_experiment_name(args)
print('\nRunning on device: {}'.format(args.device))
if args.use_gpu: print(torch.cuda.get_device_name(0))
main(args)
| 37,988
| 54.866176
| 243
|
py
|
FDS
|
FDS-main/theorem4_checker_simple.py
|
"""
This is to check that Theorem 4.1 holds in the case where all the cross term of the covariance matrix are zero, i.e.
each hypergradient is independant of all other hypergradients. We also use a constant variance=sigma^2 for all steps
"""
import numpy as np
import random
from utils.helpers import *
class ProofChecker(object):
def __init__(self, args):
self.args = args
self.args.T = args.T - args.T%args.W # make sure we have a whole number of windows that fit inside horizon
self.K = int(args.T/args.W)
print(f'Running experiments for a total of T={self.args.T} while using {self.K} hyperparameters, each shared over W={args.W} contiguous steps')
print(f'not sharing: expected MSE = sigma^2 = {args.sigma**2}')
print(f'sharing: expected MSE for min drift = sigma^2/W = {args.sigma**2/args.W}')
print(f'sharing: expected MSE for max drift (upper bound) = sigma^2/W + eps^2(W^2-1)/12 = {args.sigma**2/args.W + args.epsilon**2*(args.W**2 - 1)/12}')
def sample_min_drift(self):
"""
epsilon_t = 0 for all time steps
"""
hypergrad_means = np.array([self.args.mu_0 for _ in range(self.args.T)])
hypergrads = np.random.normal(hypergrad_means, self.args.sigma, size=(self.args.n_seeds, self.args.T))
optimal_hypergrads = hypergrad_means
return hypergrads, optimal_hypergrads
def sample_max_drift(self):
"""
epsilon_t = epsilon for all time steps
"""
hypergrad_means = np.array([self.args.mu_0 + n*self.args.epsilon for n in range(self.args.T)])
hypergrads = np.random.normal(hypergrad_means, self.args.sigma, size=(self.args.n_seeds, self.args.T))
optimal_hypergrads = hypergrad_means
return hypergrads, optimal_hypergrads
def sample_random_drift(self):
epsilons = np.random.uniform(-self.args.epsilon, self.args.epsilon, self.args.T-1)
hypergrad_means = [self.args.mu_0]
for eps in epsilons:
hypergrad_means.append(hypergrad_means[-1]+eps)
hypergrad_means = np.array(hypergrad_means)
hypergrads = np.random.normal(hypergrad_means, self.args.sigma, size=(self.args.n_seeds, self.args.T))
optimal_hypergrads = hypergrad_means
return hypergrads, optimal_hypergrads
def mse_not_sharing(self, hypergrads, optimal_hypergrads):
return np.mean((hypergrads - optimal_hypergrads)**2)
def mse_sharing(self, hypergrads, optimal_hypergrads):
hypergrads_after_sharing = [np.mean(h.reshape((self.K, self.args.W)), axis=1).repeat(self.args.W) for h in hypergrads]
hypergrads_after_sharing = np.array(hypergrads_after_sharing)
return np.mean((hypergrads_after_sharing - optimal_hypergrads)**2)
def run(self):
print('\nMIN DRIFT:')
hypergrads, optimal_hypergrads = self.sample_min_drift()
mse_not_sharing, mse_sharing = self.mse_not_sharing(hypergrads, optimal_hypergrads), self.mse_sharing(hypergrads, optimal_hypergrads)
print(f'actual mse when not sharing = {mse_not_sharing:.5f} --- mse sharing = {mse_sharing:.5f}')
print('\nMAX DRIFT:')
hypergrads, optimal_hypergrads = self.sample_max_drift()
mse_not_sharing, mse_sharing = self.mse_not_sharing(hypergrads, optimal_hypergrads), self.mse_sharing(hypergrads, optimal_hypergrads)
print(f'actual mse not sharing = {mse_not_sharing:.5f} --- mse sharing = {mse_sharing:.5f}')
print('\nRANDOM DRIFT:')
hypergrads, optimal_hypergrads = self.sample_random_drift()
mse_not_sharing, mse_sharing = self.mse_not_sharing(hypergrads, optimal_hypergrads), self.mse_sharing(hypergrads, optimal_hypergrads)
print(f'actual mse not sharing = {mse_not_sharing:.5f} --- mse sharing = {mse_sharing:.5f}')
def main(args):
np.random.seed(args.seed)
t0 = time.time()
proof = ProofChecker(args)
proof.run()
print(f'\nTotal time: {format_time(time.time() - t0)}')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Welcome to GreedyGrad')
parser.add_argument('--T', type=int, default=400, help='total number of inner_steps or batches, where each batch would make use of a different hyperparameter')
parser.add_argument('--n_seeds', type=int, default=10000, help='how many seeds to sample. Each seed = T hypergradients')
parser.add_argument('--W', type=int, default=40, help='window to share hyperparameters from contiguous steps over')
parser.add_argument('--mu_0', type=float, default=0.0)
parser.add_argument('--sigma', type=float, default=0.25)
parser.add_argument('--epsilon', type=float, default=0.08)
parser.add_argument('--seed', type=int, default=2)
args = parser.parse_args()
assert args.W%2==0, "even W required for lower bound of MSE_shared to be right"
main(args)
### NOTES:
# sharing should always help in min_drift/random_drift setting, but needs small W to help in max_drift setting
| 5,039
| 45.666667
| 163
|
py
|
FDS
|
FDS-main/theorem4_checker_advanced.py
|
"""
This is to check that Theorem 4.1 holds in the case where each step has its own variance,
and where all steps are correlated with one another
"""
import numpy as np
import random
from sklearn.datasets import make_spd_matrix
from utils.helpers import *
class ProofChecker(object):
def __init__(self, args):
self.args = args
self.args.T = args.T - args.T%args.W # make sure we have a whole number of windows that fit inside horizon
self.K = int(args.T/args.W)
T, W, eps, c = args.T, args.W, args.epsilon, args.c
# self.correlation_matrix = np.random.uniform(low=-args.c, high=args.c, size=(args.T, args.T))
# np.fill_diagonal(self.correlation_matrix, 1)
# self.sigmas = np.random.uniform(low=0, high=args.max_sigma, size=args.T)
# self.covariance_matrix = np.diag(self.sigmas)@self.correlation_matrix@np.diag(self.sigmas)
## Correlation matrix has lots of different values, maximum is c
# self.covariance_matrix = make_spd_matrix(T)/10 #random positive definite symmetric matrix
# np.fill_diagonal(self.covariance_matrix, np.random.uniform(1,args.max_var, T)) #increase var = lower maximum correlation
# vars = np.diag(self.covariance_matrix)
# stds = np.sqrt(vars)
# self.correlation_matrix = self.covariance_matrix / np.outer(stds, stds)
# np.fill_diagonal(self.correlation_matrix, 0)
# c0 = np.max(np.abs(self.correlation_matrix)) #max correlation
# assert c0 < 1
## worst case correlation matrix has c for all it's non-diagonal entries
# we still need the covariance to be positive semi definite. It can be shown that
# if all off-diagonal entries of the TxT matrix are equal to c, then we need c >= -1/(T-1)
self.correlation_matrix_worst_case = np.full((T,T), c)
np.fill_diagonal(self.correlation_matrix_worst_case, 1)
vars = np.random.uniform(1,args.max_var, T)
stds = np.sqrt(vars)
self.covariance_matrix_worst_case = self.correlation_matrix_worst_case * np.outer(stds, stds)
print(f'sum of covariance matrix: {np.sum(self.covariance_matrix_worst_case)}')
print(f'Running experiments for a total of T={self.args.T} while using {self.K} windows of W={W} steps, running {args.n_seeds} seeds')
print(f'max off diagonal correlation is c: {c:.3f}')
print(f'not sharing: expected MSE = {np.mean(vars)}')
print(f'sharing: expected MSE upper bound for max drift = (1+c(W-1))/W)*(1/T)*sum(D_tt) + eps^2(W^2-1)/12 = {((1+c*(W-1))/W) * np.mean(vars) + eps**2*(W**2 - 1)/12}')
# print(f'W* = best W when max drift = lower bound to optimal W otherwise = (6*sigma^2/esilon^2)^(1/3) = {(6*args.sigma**2/args.epsilon**2)**(1/3):.3f}')
def sample_max_drift(self):
"""
epsilon_t = epsilon for all time steps
"""
hypergrad_means = np.array([self.args.mu_0 + n*self.args.epsilon for n in range(self.args.T)])
# hypergrads = np.random.multivariate_normal(hypergrad_means, self.covariance_matrix, size=(self.args.n_seeds))
hypergrads = np.random.multivariate_normal(hypergrad_means, self.covariance_matrix_worst_case, size=(self.args.n_seeds))
optimal_hypergrads = hypergrad_means
return hypergrads, optimal_hypergrads
def sample_min_drift(self):
"""
epsilon_t = 0 for all time steps
"""
hypergrad_means = np.array([self.args.mu_0 for _ in range(self.args.T)])
# hypergrads = np.random.multivariate_normal(hypergrad_means, self.covariance_matrix, size=(self.args.n_seeds))
hypergrads = np.random.multivariate_normal(hypergrad_means, self.covariance_matrix_worst_case, size=(self.args.n_seeds))
optimal_hypergrads = hypergrad_means
return hypergrads, optimal_hypergrads
def sample_random_drift(self):
epsilons = np.random.uniform(-self.args.epsilon, self.args.epsilon, self.args.T-1)
hypergrad_means = [self.args.mu_0]
for eps in epsilons:
hypergrad_means.append(hypergrad_means[-1]+eps)
hypergrad_means = np.array(hypergrad_means)
# hypergrads = np.random.multivariate_normal(hypergrad_means, self.covariance_matrix, size=(self.args.n_seeds))
hypergrads = np.random.multivariate_normal(hypergrad_means, self.covariance_matrix_worst_case, size=(self.args.n_seeds))
optimal_hypergrads = hypergrad_means
return hypergrads, optimal_hypergrads
def mse_not_sharing(self, hypergrads, optimal_hypergrads):
return np.mean((hypergrads - optimal_hypergrads)**2)
def mse_sharing(self, hypergrads, optimal_hypergrads):
hypergrads_after_sharing = [np.mean(h.reshape((self.K, self.args.W)), axis=1).repeat(self.args.W) for h in hypergrads]
hypergrads_after_sharing = np.array(hypergrads_after_sharing)
return np.mean((hypergrads_after_sharing - optimal_hypergrads)**2)
def run(self):
print('\nMIN DRIFT:')
hypergrads, optimal_hypergrads = self.sample_min_drift()
mse_not_sharing, mse_sharing = self.mse_not_sharing(hypergrads, optimal_hypergrads), self.mse_sharing(hypergrads, optimal_hypergrads)
print(f'actual mse when not sharing = {mse_not_sharing:.5f} --- mse sharing = {mse_sharing:.5f}')
print('\nMAX DRIFT:')
hypergrads, optimal_hypergrads = self.sample_max_drift()
mse_not_sharing, mse_sharing = self.mse_not_sharing(hypergrads, optimal_hypergrads), self.mse_sharing(hypergrads, optimal_hypergrads)
print(f'actual mse not sharing = {mse_not_sharing:.5f} --- mse sharing = {mse_sharing:.5f}')
print('\nRANDOM DRIFT:')
hypergrads, optimal_hypergrads = self.sample_random_drift()
mse_not_sharing, mse_sharing = self.mse_not_sharing(hypergrads, optimal_hypergrads), self.mse_sharing(hypergrads, optimal_hypergrads)
print(f'actual mse not sharing = {mse_not_sharing:.5f} --- mse sharing = {mse_sharing:.5f}')
def main(args):
np.random.seed(args.seed)
random.seed(args.seed)
t0 = time.time()
proof = ProofChecker(args)
proof.run()
print(f'\nTotal time: {format_time(time.time() - t0)}')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Welcome to GreedyGrad')
parser.add_argument('--T', type=int, default=400, help='total number of inner_steps or batches, where each batch would make use of a different hyperparameter')
parser.add_argument('--n_seeds', type=int, default=10000, help='how many seeds to sample. Each seed = T hypergradients')
parser.add_argument('--W', type=int, default=40, help='window to share hyperparameters from contiguous steps over')
parser.add_argument('--c', type=float, default=0.1, help='used for all values in correlation matrix')
parser.add_argument('--epsilon', type=float, default=0.01)
parser.add_argument('--max_var', type=float, default=1.5, help='high values make max correlation smaller. Must be >1 to preserve semi-definite nature of covariance matrix')
parser.add_argument('--mu_0', type=float, default=0.0)
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
assert args.max_var >= 1
assert args.c > -1/(args.T-1) #otherwise covariance won't be positive semi-definite
main(args)
### NOTES:
# sharing should always help in min_drift/random_drift setting, but needs small W to help in max_drift setting
| 7,505
| 52.234043
| 176
|
py
|
FDS
|
FDS-main/figure2_hypergradients_fluctuation.py
|
"""
Here we measure hypergradients for several runs when perturbing
the training data and weight initialization. This must be done on toy
datasets where reverse-mode differentiation is tractable. This corresponds
to figure 2 in the paper.
"""
import torch.optim as optim
import pickle
import os
import warnings
import sys
import shutil
import torch
import torch.nn.functional as F
import torch.optim as optimw
from utils.helpers import *
from utils.datasets import *
from models.selector import *
class HyperGradFluctuation(object):
def __init__(self, args):
self.args = args
self.hypergrads_all = torch.zeros((self.args.n_runs, self.args.T))
self.cross_entropy = nn.CrossEntropyLoss()
self.init_lr_schedule()
## Loaders
self.infinite_train_loader, self.val_loader, _ = get_loaders(datasets_path=self.args.datasets_path,
dataset=self.args.dataset,
train_batch_size=self.args.train_batch_size,
val_batch_size=self.args.n_val_images,
val_source='test',
workers=self.args.workers,
train_infinite=True,
val_infinite=False)
for x,y in self.val_loader: self.X_val, self.Y_val = x.to(device=self.args.device), y.to(device=self.args.device)
## Set up experiment folder
self.experiment_path = os.path.join(self.args.log_directory_path, self.args.experiment_name)
if os.path.isfile(os.path.join(self.experiment_path, 'hypergrads.pth.tar')):
if args.use_gpu: raise FileExistsError(f'Experiment already ran and exists at {self.experiment_path}. \nStopping now')
else:
if os.path.exists(self.experiment_path):
shutil.rmtree(self.experiment_path)
os.makedirs(self.experiment_path)
## Save and Print Args
copy_file(os.path.realpath(__file__), self.experiment_path) # save this python file in logs folder
print('\n---------')
with open(os.path.join(self.experiment_path, 'args.txt'), 'w+') as f:
for k, v in self.args.__dict__.items():
print(k, v)
f.write("{} \t {}\n".format(k, v))
print('---------\n')
def init_lr_schedule(self):
if self.args.inner_lr_cosine_anneal:
dummy_opt = optim.SGD([torch.ones([1], requires_grad=True)], lr=self.args.inner_lr_init)
dummy_scheduler = optim.lr_scheduler.CosineAnnealingLR(dummy_opt, T_max=self.args.T)
lrs = []
for i in range(self.args.T):
lrs.append(dummy_scheduler.get_last_lr()[0])
dummy_opt.step()
dummy_scheduler.step()
self.inner_lrs = torch.tensor(lrs, requires_grad=True, device=self.args.device)
else:
self.inner_lrs = torch.full((self.args.T,), self.args.inner_lr_init, requires_grad=True, device=self.args.device)
def inner_and_outer_loop(self):
for self.inner_step_idx, (x_train, y_train) in enumerate(self.infinite_train_loader):
x_train, y_train = x_train.to(self.args.device, self.args.dtype), y_train.to(self.args.device)
train_logits = self.classifier.forward_with_param(x_train, self.weights)
train_loss = self.cross_entropy(train_logits, y_train)
grads = torch.autograd.grad(train_loss, self.weights, create_graph=True)[0]
if self.args.clamp_inner_grads: grads.clamp_(-self.args.clamp_inner_grads_range, self.args.clamp_inner_grads_range)
self.velocity = self.args.inner_momentum * self.velocity + (grads + self.args.inner_weight_decay * self.weights)
self.weights = self.weights - self.inner_lrs[self.inner_step_idx] * self.velocity
if self.args.greedy:
self.compute_hypergradients() #only populates .grad of one item in self.inner_lrs
self.weights.detach_().requires_grad_()
self.velocity.detach_().requires_grad_()
if self.inner_step_idx+1 == self.args.T: break
if not self.args.greedy: self.compute_hypergradients() #populates .grad of all items in self.inner_lrs
def compute_hypergradients(self):
val_logits = self.classifier.forward_with_param(self.X_val, self.weights)
val_loss = self.cross_entropy(val_logits, self.Y_val)
val_loss.backward()
def run(self):
for self.run_idx in range(self.args.n_runs):
self.classifier = select_model(True, self.args.dataset, self.args.architecture,
self.args.init_type, self.args.init_param,
self.args.device).to(self.args.device)
self.weights = self.classifier.get_param()
self.velocity = torch.zeros(self.weights.numel(), device=self.args.device)
self.inner_and_outer_loop()
self.hypergrads_all[self.run_idx] = self.inner_lrs.grad.detach()
self.inner_lrs.grad.data.zero_()
self.save_final()
def save_final(self):
torch.save({'args': self.args,
'hypergrads_all': self.hypergrads_all},
os.path.join(self.experiment_path, 'hypergrads.pth.tar'))
print(f"Saved hypergrads to {os.path.join(self.experiment_path, 'hypergrads.pth.tar')}")
# ________________________________________________________________________________
# ________________________________________________________________________________
# ________________________________________________________________________________
def make_experiment_name(args):
experiment_name = f'Hg_{args.dataset}_{args.init_type}_T{args.T}_tbs{args.train_batch_size}_mom{args.inner_momentum}_wd{args.inner_weight_decay}_ilr{args.inner_lr_init}'
if args.inner_lr_cosine_anneal: experiment_name += f'cosine'
if args.greedy: experiment_name += f'_GREEDY'
if args.dtype == torch.float64: experiment_name += '_FL64'
experiment_name += f'_S{args.seed}'
return experiment_name
def main(args):
set_torch_seeds(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
t0 = time.time()
hypervariance_learner = HyperGradFluctuation(args)
hypervariance_learner.run()
total_time = time.time() - t0
with open(os.path.join(args.log_directory_path, args.experiment_name, 'TOTAL_TIME_' + format_time(total_time)), 'w+') as f:
f.write("NA")
if __name__ == "__main__":
import argparse
print('Running...')
parser = argparse.ArgumentParser(description='Welcome to GreedyGrad')
## Main
parser.add_argument('--T', type=int, default=250, help='number of batches for the task and to learn a schedule over')
parser.add_argument('--n_runs', type=int, default=100, help='how many times to compute hypergrads, with different train-val-split each time')
parser.add_argument('--dataset', type=str, default='SVHN')
parser.add_argument('--greedy', type=str2bool, default=False)
parser.add_argument('--architecture', type=str, default='LeNet')
parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal', 'zero', 'default'], help='network initialization scheme')
parser.add_argument('--init_param', type=float, default=1, help='network initialization param: gain, std, etc.')
parser.add_argument('--n_val_images', type=int, default=2000, help='ignored unless val_source=train') #20% of 60k=12000
## Inner Loop
parser.add_argument('--inner_lr_init', type=float, default=0.01, help='Used to initialize inner learning rate(s).')
parser.add_argument('--inner_lr_cosine_anneal', type=str2bool, default=True, help='Initial schedule is cosine annealing')
parser.add_argument('--inner_momentum', type=float, default=0.9, help='SGD inner momentum')
parser.add_argument('--inner_weight_decay', type=float, default=0.0, help='SGD + ADAM inner weight decay')
parser.add_argument('--train_batch_size', type=int, default=128)
parser.add_argument('--clamp_inner_grads', type=str2bool, default=True)
parser.add_argument('--clamp_inner_grads_range', type=float, default=1, help='clamp inner grads for each batch to +/- that')
## Misc
parser.add_argument('--datasets_path', type=str, default="~/Datasets/Pytorch/")
parser.add_argument('--log_directory_path', type=str, default="./logs/")
parser.add_argument('--dtype', type=str, default='float32', choices=['float32', 'float64'])
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--workers', type=int, default=0)
parser.add_argument('--use_gpu', type=str2bool, default=True)
args = parser.parse_args()
args.dataset_path = os.path.join(args.datasets_path, args.dataset)
args.use_gpu = args.use_gpu and torch.cuda.is_available()
args.device = torch.device('cuda') if args.use_gpu else torch.device('cpu')
if args.dtype == 'float64':
torch.set_default_tensor_type(torch.DoubleTensor) # changes weights and tensors but not loaders
args.dtype = torch.float64 if args.dtype == 'float64' else torch.float32
print('\nRunning on device: {}'.format(args.device))
args.experiment_name = make_experiment_name(args)
main(args)
| 9,738
| 49.201031
| 178
|
py
|
FDS
|
FDS-main/models/wresnet.py
|
"""
Base architecture taken from https://github.com/xternalz/WideResNet-pytorch
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.meta_factory import ReparamModule
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate):
super(BasicBlock, self).__init__()
self.dropRate = dropRate
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if self.equalInOut:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out)))
else: #keep x var so can add it in skip connection
x = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(x)))
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, n_classes, n_channels, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(n_channels, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.final_bn = nn.BatchNorm2d(nChannels[3], affine=True)
self.final_relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], n_classes)
self.nChannels = nChannels[3]
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.final_relu(self.final_bn(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
class MetaWideResNet(ReparamModule):
def __init__(self, depth, n_classes, n_channels, widen_factor=1, dropRate=0.0, device='cpu'):
super(MetaWideResNet, self).__init__()
self.device = device
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(n_channels, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.final_bn = nn.BatchNorm2d(nChannels[3], affine=True)
self.final_relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], n_classes)
self.nChannels = nChannels[3]
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.final_relu(self.final_bn(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
if __name__ == '__main__':
import time
from torchsummary import summary
from utils.helpers import *
set_torch_seeds(0)
x = torch.FloatTensor(2, 3, 32, 32).uniform_(0, 1)
## Test normal WRN
model = WideResNet(depth=40, widen_factor=2, n_channels=3, n_classes=10, dropRate=0.0)
t0 = time.time()
out = model(x)
print(f'time for normal fw pass: {time.time() - t0}s')
summary(model, (3, 32, 32))
## Test meta WRN
model = MetaWideResNet(depth=40, widen_factor=2, n_channels=3, n_classes=10, device='cpu')
weights = model.get_param()
t0 = time.time()
out = model.forward_with_param(x, weights)
print(f'time for meta fw pass: {time.time() - t0}s')
summary(model, (3, 32, 32))
| 5,745
| 38.627586
| 116
|
py
|
FDS
|
FDS-main/models/lenet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.meta_factory import ReparamModule
from models.helpers import *
class Flatten(nn.Module):
"""
NN module version of torch.nn.functional.flatten
"""
def __init__(self):
super().__init__()
def forward(self, input):
return torch.flatten(input, start_dim=1, end_dim=-1)
class LeNet(nn.Module):
def __init__(self, n_classes, n_channels, im_size):
super(LeNet, self).__init__()
assert im_size in [28, 32]
h = 16*5*5 if im_size==32 else 16*4*4
self.n_classes = n_classes
self.n_channels = n_channels
self.im_size = im_size
self.layers = nn.Sequential(
nn.Conv2d(n_channels, 6, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
Flatten(),
nn.Linear(h, 120),
nn.ReLU(inplace=True),
nn.Linear(120, 84),
nn.ReLU(inplace=True),
nn.Linear(84, n_classes))
def forward(self, x):
return self.layers(x)
class MetaLeNet(ReparamModule):
def __init__(self, n_classes, n_channels, im_size, device='cpu'):
super(MetaLeNet, self).__init__()
assert im_size in [28, 32]
h = 16*5*5 if im_size==32 else 16*4*4
self.n_classes = n_classes
self.n_channels = n_channels
self.im_size = im_size
self.device = device # must be defined for parent class
self.layers = nn.Sequential(
nn.Conv2d(n_channels, 6, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
Flatten(),
nn.Linear(h, 120),
nn.ReLU(inplace=True),
nn.Linear(120, 84),
nn.ReLU(inplace=True),
nn.Linear(84, n_classes))
def forward(self, x):
return self.layers(x)
if __name__ == '__main__':
import time
from torchsummary import summary
from utils.helpers import *
set_torch_seeds(0)
x = torch.FloatTensor(256, 3, 32, 32).uniform_(0, 1)
## Test meta LeNet
model = MetaLeNet(n_classes=10, n_channels=3, im_size=32, device='cpu')
weights = model.get_param()
t0 = time.time()
out = model.forward_with_param(x, weights)
print(f'time for meta fw pass: {time.time() - t0}s')
summary(model, (3, 32, 32))
| 2,829
| 25.203704
| 75
|
py
|
FDS
|
FDS-main/models/meta_factory.py
|
"""
This is a slim version of the code from https://github.com/SsnL/dataset-distillation
"""
import torch
import torchvision
import logging
import torch.nn as nn
import torch.nn.functional as F
import functools
import math
import types
from contextlib import contextmanager
from torch.optim import lr_scheduler
from six import add_metaclass
from itertools import chain
from copy import deepcopy
from models.helpers import *
class MetaFactory(type):
def __call__(cls, *args, **kwargs):
r"""Called when you call ReparamModule(...) """
net = type.__call__(cls, *args, **kwargs)
# collect weight (module, name) pairs
# flatten weights
w_modules_names = []
for m in net.modules():
for n, p in m.named_parameters(recurse=False):
if p is not None:
w_modules_names.append((m, n))
for n, b in m.named_buffers(recurse=False):
if b is not None:
pass
# logging.warning((
# '{} contains buffer {}. The buffer will be treated as '
# 'a constant and assumed not to change during gradient '
# 'steps. If this assumption is violated (e.g., '
# 'BatcHNorm*d\' running_mean/var), the computation will '
# 'be incorrect.').format(m.__class__.__name__, n))
net._weights_module_names = tuple(w_modules_names)
# Put to correct device before we do stuff on parameters
#net = net.to(device)
ws = tuple(m._parameters[n].detach() for m, n in w_modules_names)
assert len(set(w.dtype for w in ws)) == 1
# reparam to a single flat parameter
net._weights_numels = tuple(w.numel() for w in ws)
net._weights_shapes = tuple(w.shape for w in ws)
with torch.no_grad():
flat_w = torch.cat([w.reshape(-1) for w in ws], 0)
# remove old parameters, assign the names as buffers
for m, n in net._weights_module_names:
delattr(m, n)
m.register_buffer(n, None)
# register the flat one
net.register_parameter('flat_w', nn.Parameter(flat_w, requires_grad=True))
return net
@add_metaclass(MetaFactory)
class ReparamModule(nn.Module):
"""
Make an architecture inherit this class instead of nn.Module to allow .forward_with_params()
This changes state_dict() to a one value dict containing 'flat_w'
This requires self.device to be defined in the module
"""
def _apply(self, *args, **kwargs):
rv = super(ReparamModule, self)._apply(*args, **kwargs)
return rv
def get_param(self, clone=False):
if clone:
return self.flat_w.detach().clone().requires_grad_(self.flat_w.requires_grad).to(device=self.device)
return self.flat_w.to(device=self.device)
@contextmanager
def unflatten_weight(self, flat_w):
"""
This changes self.state_dict()
from --> odict_keys(['flat_w'])
to --> odict_keys(['flat_w', 'layers.0.weight', 'layers.0.bias', ... ]
Somehow removes 'bias=False' in self._weights_module_names conv names, and
replaces 'bias=False' by 'bias=True' in linear layers.
type(self.state_dict()) = <class 'collections.OrderedDict'> before and after
"""
ws = (t.view(s) for (t, s) in zip(flat_w.split(self._weights_numels), self._weights_shapes))
for (m, n), w in zip(self._weights_module_names, ws):
setattr(m, n, w)
yield
for m, n in self._weights_module_names:
setattr(m, n, None)
def forward_with_param(self, inp, new_w):
#print(type(self.state_dict()))
with self.unflatten_weight(new_w):
# print('FLATTENED')
# print('state_dict: ', type(self.state_dict()), [(k, v.shape) for k,v in self.state_dict().items()])
# print('self._weights_module_names: ', self._weights_module_names)
return nn.Module.__call__(self, inp)
def __call__(self, inp):
return self.forward_with_param(inp, self.flat_w)
def load_state_dict(self, state_dict, *args, **kwargs):
"""
Make load_state_dict work on both singleton dicts
containing a flattened weight tensor and full dicts
containing unflattened weight tensors. Useful when loading
weights from non-meta architectures
"""
if len(state_dict) == 1 and 'flat_w' in state_dict:
return super(ReparamModule, self).load_state_dict(state_dict, *args, **kwargs)
with self.unflatten_weight(self.flat_w):
flat_w = self.flat_w
del self.flat_w
super(ReparamModule, self).load_state_dict(state_dict, *args, **kwargs)
self.register_parameter('flat_w', flat_w)
def unflattened_weights(self):
#print(float(torch.sum(self.state_dict()['flat_w'])))
with self.unflatten_weight(self.flat_w):
state_dict = deepcopy(self.state_dict())
del state_dict['flat_w']
return state_dict
def layer_names(self):
layer_names = []
layer_count = 0
prev_layer = None
for (name, n) in zip(self._weights_module_names, self._weights_numels):
if name[0] != prev_layer:
layer_count += 1
prev_layer = name[0]
if isinstance(name[0], torch.nn.Conv2d) and name[1]=='weight':
layer_names.append('L{}_conv_W_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.Conv2d) and name[1]=='bias':
layer_names.append('L{}_conv_b_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.BatchNorm2d) and name[1]=='weight':
layer_names.append('L{}_bn_W_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.BatchNorm2d) and name[1]=='bias':
layer_names.append('L{}_bn_b_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.Linear) and name[1]=='weight':
layer_names.append('L{}_fc_W_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.Linear) and name[1]=='bias':
layer_names.append('L{}_fc_b_s{}'.format(layer_count, n))
else:
raise ValueError('Unknown layer type {}'.format(name))
return layer_names
def get_bn_masks(self):
"""
Returns 2 boolean masks of size n_weights,
where ones correspond to batchnorm gammas in first mask,
and batchnorm betas in second mask
"""
gammas_mask = torch.zeros(self.flat_w.shape[0], dtype=torch.bool)
betas_mask = torch.zeros(self.flat_w.shape[0], dtype=torch.bool)
i = 0
for (name, n) in zip(self._weights_module_names, self._weights_numels):
is_BN = isinstance(name[0], torch.nn.BatchNorm2d) or isinstance(name[0], torch.nn.BatchNorm1d)
if is_BN and name[1]=='weight':
gammas_mask[i:i+n] = 1
elif is_BN and name[1]=='bias':
betas_mask[i:i+n] = 1
i += n
return gammas_mask, betas_mask
def flattened_unflattened_weights(self):
"""
somehow unflattening weights changes the value of their sum.
This looks like it's because permutation matters in float 32 sum operation and
so different data structures give different results to the same operations
even though they contain the same values. Here unflattening and reflattening
recovers the sum value of the original self.get_param() method.
"""
with self.unflatten_weight(self.flat_w):
state_dict = deepcopy(self.state_dict())
del state_dict['flat_w']
flat_w = torch.cat([w.reshape(-1) for w in state_dict.values()], 0) #.type(torch.DoubleTensor) doesn't change behaviour
return flat_w
def initialize(self, init_type='xavier', init_param=1, init_norm_weights=1, inplace=True):
if inplace:
flat_w = self.flat_w
else:
flat_w = torch.empty_like(self.flat_w).requires_grad_()
with torch.no_grad():
with self.unflatten_weight(flat_w):
initialize(self, init_type=init_type, init_param=init_param, init_norm_weights=init_norm_weights)
return flat_w
| 8,482
| 35.722944
| 127
|
py
|
FDS
|
FDS-main/models/helpers.py
|
import torch.nn as nn
from torch.nn import init
def initialize(net, init_type, init_param, init_norm_weights=1):
""" various initialization schemes """
def init_func(m):
classname = m.__class__.__name__
if classname.startswith('Conv') or classname == 'Linear':
if getattr(m, 'bias', None) is not None:
init.constant_(m.bias, 0.0) #if init_type = default bias isn't kept to zero
if getattr(m, 'weight', None) is not None:
if init_type == 'normal':
init.normal_(m.weight, 0.0, init_param)
elif init_type == 'xavier':
init.xavier_normal_(m.weight, gain=init_param)
elif init_type == 'xavier_unif':
init.xavier_uniform_(m.weight, gain=init_param)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight, a=init_param, mode='fan_in')
elif init_type == 'kaiming_out':
init.kaiming_normal_(m.weight, a=init_param, mode='fan_out')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight, gain=init_param)
elif init_type == 'default':
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif 'Norm' in classname: #different Pytorch versions differ in BN init so do it manually
if getattr(m, 'weight', None) is not None:
m.weight.data.fill_(init_norm_weights)
if getattr(m, 'bias', None) is not None:
m.bias.data.zero_()
net.apply(init_func)
return net
| 1,798
| 43.975
| 106
|
py
|
FDS
|
FDS-main/models/selector.py
|
from models.lenet import *
from models.wresnet import *
def select_model(meta,
dataset,
architecture,
init_type='xavier',
init_param=1,
device='cpu'):
"""
Meta models require device to be provided during init.
"""
if dataset in ['MNIST', 'FashionMNIST']:
n_classes, n_channels, im_size = 10, 1, 28
kwargs0 = {'n_classes':n_classes, 'n_channels':n_channels, 'im_size':im_size}
if architecture == 'LeNet':
model = MetaLeNet(**kwargs0, device=device) if meta else LeNet(**kwargs0)
elif architecture == 'LeNet-BN': #debug neg learning rates
model = MetaLeNetBN(**kwargs0, device=device) if meta else LeNetBN(**kwargs0)
else:
raise NotImplementedError
elif dataset in ['SVHN', 'CIFAR10', 'CIFAR100']:
n_channels, im_size = 3, 32
n_classes = 100 if dataset == 'CIFAR100' else 10
kwargs0 = {'n_classes':n_classes, 'n_channels':n_channels}
if architecture == 'LeNet':
kwargs1 = {'im_size':im_size}
model = MetaLeNet(**kwargs0, **kwargs1, device=device) if meta else LeNet(**kwargs0, **kwargs1)
elif architecture == 'LeNetBN':
kwargs1 = {'im_size':im_size}
model = MetaLeNetBN(**kwargs0, **kwargs1, device=device) if meta else LeNetBN(**kwargs0, **kwargs1)
elif architecture == 'WRN-10-1':
kwargs1 = {'depth':10, 'widen_factor':1, 'dropRate':0.0}
model = MetaWideResNet(**kwargs0, **kwargs1, device=device) if meta else WideResNet(**kwargs0, **kwargs1)
elif architecture == 'WRN-16-1':
kwargs1 = {'depth':16, 'widen_factor':1, 'dropRate':0.0}
model = MetaWideResNet(**kwargs0, **kwargs1, device=device) if meta else WideResNet(**kwargs0, **kwargs1)
elif architecture == 'WRN-40-2':
kwargs1 = {'depth':40, 'widen_factor':2, 'dropRate':0.0}
model = MetaWideResNet(**kwargs0, **kwargs1, device=device) if meta else WideResNet(**kwargs0, **kwargs1)
else:
raise NotImplementedError
else:
raise NotImplementedError
## Initialization schemes
if meta:
model.initialize(init_type=init_type, init_param=init_param, init_norm_weights=1, inplace=True)
else:
initialize(model, init_type=init_type, init_param=init_param, init_norm_weights=1)
return model
if __name__ == '__main__':
from torchsummary import summary
from utils.helpers import *
## Check meta and normal models do the same calculations
# x1 = torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1)
# x2 = torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1)
# set_torch_seeds(0)
# model = select_model(False, dataset='CIFAR10', architecture='WRN-10-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# set_torch_seeds(0)
# meta_model = select_model(True, dataset='CIFAR10', architecture='WRN-10-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# meta_weights = meta_model.get_param()
#
# model.train(), meta_model.train() #x1 before and after x2 if comment out eval mode below.
#
# model_output = model(x1)
# meta_model_output = meta_model.forward_with_param(x1, meta_weights)
# print(float(torch.sum(model_output)), float(torch.sum(meta_model_output)))
#
# model_output = model(x2)
# meta_model_output = meta_model.forward_with_param(x2, meta_weights)
# print(float(torch.sum(model_output)), float(torch.sum(meta_model_output)))
#
# model.eval(), meta_model.eval() #x1 output changes in eval now because running stats were calculated
# model_output = model(x1)
# meta_model_output = meta_model.forward_with_param(x1, meta_weights)
# print(float(torch.sum(model_output)), float(torch.sum(meta_model_output)))
# x = torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1)
#
# t0 = time.time()
# model = select_model(True, dataset='CIFAR10', architecture='WRN-16-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# output = model(x)
# print("Time taken for forward pass: {} s".format(time.time.time() - t0))
# print("\nOUTPUT SHAPE: ", output.shape)
# summary(model, (3, 32, 32), max_depth=5)
## Weights init for normal model
# model = select_model(False, dataset='CIFAR10', architecture='WRN-16-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# def weights_to_gaussian(m):
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): #TODO separate init for linear layer?
# torch.nn.init.normal_(m.weight, mean=0, std=0.1)
# if m.bias is not None:
# torch.nn.init.zeros_(m.bias)
# model.apply(weights_to_gaussian)
## Weights init for meta model
# model = select_model(True, dataset='CIFAR10', architecture='WRN-16-1', activation='ReLU', norm_type='BN', norm_affine=False, noRes=False)
# weights = model.get_param()
# print(len(weights))
# print(torch.sum(weights))
# torch.nn.init.normal_(weights, mean=0, std=0.1)
# print(torch.sum(weights))
## Change BN init for meta model
#model = select_model(False, dataset='CIFAR10', architecture='LeNet', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
#model = select_model(True, dataset='CIFAR10', architecture='LeNet', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
#summary(model, (3, 32, 32), max_depth=10)
# weights = model.get_param()
# #weights_numels = model._weights_numels
# #layer_names = model.layer_names()
# #print(weights.shape[0], sum(weights_numels)) #same
# #print(len(weights_numels), len(layer_names)) #same
# gammas_mask, betas_mask = model.get_bn_masks()
# print(len(weights[gammas_mask]), len(weights[betas_mask]))
# #print(weights[gammas_mask])
# print(weights[betas_mask])
## Check init
set_torch_seeds(0)
model = select_model(False, dataset='CIFAR10', architecture='ShuffleNetv2-s05', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False,
init_type='normal', init_param=1, init_norm_weights=1)
for n,p in model.named_parameters():
print(n, float(torch.sum(p)))
| 6,359
| 45.764706
| 150
|
py
|
FDS
|
FDS-main/utils/logger.py
|
import csv
import os
class Logger:
def __init__(self, filepath='./', filename='results.csv'):
if not os.path.exists(filepath): os.makedirs(filepath)
self.csv_file_path = os.path.join(filepath, filename)
def write(self, data_dict):
"""warning: this allows for wrong keys to be passed"""
if os.path.exists(self.csv_file_path):
with open(self.csv_file_path, 'a', newline='') as f: #newline='' is to make it windows compatible
writer = csv.writer(f)
writer.writerow(list(data_dict.values()))
else:
with open(self.csv_file_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(list(data_dict.keys()))
writer.writerow(list(data_dict.values()))
if __name__=="__main__":
logger = Logger('./../logs/', 'test.csv')
data1 = {'test1': 5, 'test2':49}
logger.write(data1)
data2 = {'test1': 55, 'test2':4949}
logger.write(data2)
import pandas as pd
print(pd.read_csv("./../logs/test.csv"))
| 1,079
| 29
| 109
|
py
|
FDS
|
FDS-main/utils/datasets.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import warnings
from utils.helpers import *
def unormalize_CIFAR10_image(image):
return image*torch.tensor([0.2023, 0.1994, 0.2010]).view(3,1,1) + torch.tensor([0.4914, 0.4822, 0.4465]).view(3,1,1)
def plot_image(input, unormalize=True):
if len(input.shape) > 3:
print("Use plot_images function instead!")
raise NotImplementedError
npimg = input.numpy()
if unormalize:
npimg = npimg * np.array([0.2023, 0.1994, 0.2010]).reshape(3,1,1) + np.array([0.4914, 0.4822, 0.4465]).reshape(3,1,1)
npimg = np.transpose(npimg, (1, 2, 0))
if npimg.shape[-1] != 3:
npimg = npimg[:, :, 0]
#print(npimg.shape)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
ax.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(npimg, cmap='gray')
plt.show()
return fig
def plot_images(batch, padding=2, unormalize=True):
if len(batch.shape) == 3:
plot_image(batch, unormalize=unormalize)
elif len(batch.shape) == 4:
n_images = batch.shape[0]
if n_images == 1:
plot_image(batch[0], unormalize=unormalize)
else:
grid_img = torchvision.utils.make_grid(batch, nrow=int(np.ceil(np.sqrt(n_images))), padding=padding)
plot_image(grid_img, unormalize=unormalize)
class Cutout(object):
def __init__(self, length, prob=1.0):
self.length = length
self.prob = prob
assert prob<=1, f"Cutout prob given ({prob}) must be <=1"
def __call__(self, img):
if np.random.binomial(1, self.prob):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class InfiniteDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def get_loaders(datasets_path,
dataset,
train_batch_size=128,
val_batch_size=128,
val_source='train',
val_train_fraction=0.1,
val_train_overlap=False,
workers=0,
train_infinite=False,
val_infinite=False,
cutout=False,
cutout_length=16,
cutout_prob=1):
"""
NB: val_train_fraction and val_train_overlap only used if val_source='train'
Note that infinite=True changes the seed/order of the batches
Validation is never augmented since validation stochasticity comes
from sampling different validation images anyways
"""
assert val_source in ['test', 'train']
TrainLoader = InfiniteDataLoader if train_infinite else DataLoader
ValLoader = InfiniteDataLoader if val_infinite else DataLoader
## Select relevant dataset
if dataset in ['MNIST', 'FashionMNIST']:
mean, std = (0.1307,), (0.3081,)
transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
if cutout: transform_train.transforms.append(Cutout(length=cutout_length, prob=cutout_prob))
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
if dataset == 'MNIST':
train_dataset = datasets.MNIST(datasets_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.MNIST(datasets_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.MNIST(datasets_path, train=True, download=True, transform=transform_test)
elif dataset == 'FashionMNIST':
train_dataset = datasets.FashionMNIST(datasets_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.FashionMNIST(datasets_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.FashionMNIST(datasets_path, train=True, download=True, transform=transform_test)
elif dataset == 'SVHN':
mean = (0.4377, 0.4438, 0.4728)
std = (0.1980, 0.2010, 0.1970)
dataset_path = os.path.join(datasets_path, 'SVHN') #Pytorch is inconsistent in folder structure
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
if cutout: transform_train.transforms.append(Cutout(length=cutout_length, prob=cutout_prob))
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
train_dataset = datasets.SVHN(dataset_path, split='train', download=True, transform=transform_train)
test_dataset = datasets.SVHN(dataset_path, split='test', download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.SVHN(dataset_path, split='train', download=True, transform=transform_test)
#print(len(train_dataset))
elif dataset in ['CIFAR10', 'CIFAR100']:
# official CIFAR10 std seems to be wrong (actual is [0.2470, 0.2435, 0.2616])
mean = (0.4914, 0.4822, 0.4465) if dataset == 'CIFAR10' else (0.5071, 0.4867, 0.4408)
std = (0.2023, 0.1994, 0.2010) if dataset == 'CIFAR10' else (0.2675, 0.2565, 0.2761)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
if cutout: transform_train.transforms.append(Cutout(length=cutout_length, prob=cutout_prob))
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
if dataset == 'CIFAR10':
dataset_path = os.path.join(datasets_path, 'CIFAR10') #Pytorch is inconsistent in folder structure
train_dataset = datasets.CIFAR10(dataset_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.CIFAR10(dataset_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.CIFAR10(datasets_path, train=True, download=True, transform=transform_test)
elif dataset == 'CIFAR100':
dataset_path = os.path.join(datasets_path, 'CIFAR100')
train_dataset = datasets.CIFAR100(dataset_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.CIFAR100(dataset_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.CIFAR10(datasets_path, train=True, download=True, transform=transform_test)
else:
print(f'{dataset} is not implemented')
raise NotImplementedError
## Create dataloaders
n_train_images = len(train_dataset)
#print(train_dataset)
pin_memory = True if dataset == 'ImageNet' else False
if val_source == 'test':
train_loader = TrainLoader(
dataset=train_dataset, batch_size=train_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory)
val_loader = ValLoader(
dataset=val_dataset, batch_size=val_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory)
elif val_source == 'train':
all_indices = list(range(n_train_images))
val_indices = np.random.choice(all_indices, size=int(val_train_fraction * n_train_images), replace=False)
val_loader = ValLoader(
dataset=val_dataset, batch_size=val_batch_size,
sampler=SubsetRandomSampler(val_indices), drop_last=True,
num_workers=workers, pin_memory=pin_memory)
if val_train_overlap:
train_loader = TrainLoader(
dataset=train_dataset, batch_size=train_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory)
else:
train_indices = list(set(all_indices) - set(val_indices))
train_loader = TrainLoader(
dataset=train_dataset, batch_size=train_batch_size,
sampler=SubsetRandomSampler(train_indices), drop_last=True,
num_workers=workers, pin_memory=pin_memory)
test_loader = DataLoader(
dataset=test_dataset, batch_size=val_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory) # test loader never infinite
return train_loader, val_loader, test_loader
if __name__ == '__main__':
train_loader, val_loader, test_loader = get_loaders('~/Datasets/Pytorch/',
'MNIST',
train_batch_size=500,
val_batch_size=500,
val_source='train',
val_train_fraction=0.05,
val_train_overlap=False,
workers=0,
train_infinite=False,
val_infinite=False,
cutout=True,
cutout_length=16,
cutout_prob=1)
print(len(train_loader)*500)
print(len(val_loader)*500)
for x_val, y_val in val_loader:
print(x_val.shape)
for x_train, y_train in train_loader:
break
#plot_images(x_val[:100])
plot_images(x_train[:100])
| 10,895
| 41.232558
| 155
|
py
|
FDS
|
FDS-main/utils/helpers.py
|
import csv
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
import shutil
import datetime
import json
import os
import argparse
import gc
import numpy as np
import torchvision
import functools
import time
import warnings
#warnings.simplefilter("ignore", UserWarning)
### Metrics
class AggregateTensor(object):
"""
Computes and stores the average of stream.
Mostly used to average losses and accuracies.
Works for both scalars and vectors but input needs
to be a pytorch tensor.
"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0.0001 # DIV/0!
self.sum = 0
#self.sum2 = 0
def update(self, val, w=1):
"""
:param val: new running value
:param w: weight, e.g batch size
Turn everything into floats so that we don't keep bits of the graph
"""
self.sum += w * val.detach()
self.count += w
def avg(self):
return self.sum / self.count
# def std(self):
# return np.sqrt(self.sum2/self.count - self.avg()**2)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1/batch_size))
return res
def avg_entropy(pmf):
"""
:param pmf: pytorch tensor pmf of shape [batch_size, n_classes]
:return: average entropy of pmf across entire batch
"""
#assert
assert ((pmf>=0)*(pmf<=1.00001)).all(), "All inputs must be in range [0,1] but min/max is {}/{}".format(float(torch.min(pmf)), float(torch.max(pmf)))
p_log_p = torch.log2(torch.clamp(pmf, min=0.0001, max=1.0))*pmf #log(0) causes error
return torch.mean(-p_log_p.sum(1))
def avg_max(pmf):
"""
:param pmf: pytorch tensor pmf of shape [batch_size, n_classes]
when learned the pmf doesn't have to be within [0,1]
:return: average of max predictions of pmf across entire batch
"""
assert ((pmf >= 0) * (pmf <= 1)).all(), "All inputs must be in range [0,1]"
return torch.mean(torch.max(pmf, 1)[0])
def onehot(targets, n_classes):
"""
Convert labels of form [[2], [7], ...] to
[0,0,1,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,1,0,0], ...]
:param targets:
:param n_classes:
:param device:
:return:
"""
return torch.zeros((targets.shape[0], n_classes), device=targets.device).scatter(1, targets.unsqueeze(-1), 1)
def gc_tensor_view(verbose=True):
"""
Doesn't catch intermediate variables stored by Pytorch graph
if they are not in the Python scope.
assumes all tensors are torch.float() i.e. 32 bit (4MB)
"""
total_MB_size = 0
object_counts = {}
object_MBs = {}
if verbose: print('\n------- TENSORS SEEN BY GARBAGE COLLECTOR -------')
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
MB_size = np.prod(obj.size()) * 4 / 1024**2 #assume float32
total_MB_size += MB_size #str(type(obj))
key = str(obj.size())[10:]
object_counts[key] = object_counts.get(key, 0) + 1
object_MBs[key] = MB_size
except:
pass
if verbose:
object_totals = {k:object_counts[k] * object_MBs[k] for k in object_MBs.keys()}
for key, value in sorted(object_totals.items(), key=lambda item: item[1], reverse=True):
print("{} x {} ({:.0f}MB) = {:.0f}MB".format(object_counts[key], key, object_MBs[key], object_counts[key]*object_MBs[key]))
print("TOTAL MEMORY USED BY PYTORCH TENSORS: {:.0f} MB".format(total_MB_size))
def set_torch_seeds(seed):
import random
import numpy as np
import torch
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def timer(func):
"""Print the runtime of the decorated function"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter() # 1
value = func(*args, **kwargs)
end_time = time.perf_counter() # 2
run_time = end_time - start_time # 3
#print('\n------------------------------')
print(f"--- Ran func {func.__name__!r} in {format_time(run_time)} ---")
#print('------------------------------\n')
return value
return wrapper_timer
### Data view and read
def unormalize_CIFAR10_image(image):
return image*torch.tensor([0.2023, 0.1994, 0.2010]).view(3,1,1) + torch.tensor([0.4914, 0.4822, 0.4465]).view(3,1,1)
# def plot_image(input, unormalize=False):
# if len(input.shape) > 3:
# print("Use plot_images function instead!")
# raise NotImplementedError
# npimg = input.numpy()
# if unormalize:
# npimg = npimg * np.array([0.2023, 0.1994, 0.2010]).reshape(3,1,1) + np.array([0.4914, 0.4822, 0.4465]).reshape(3,1,1)
# npimg = np.transpose(npimg, (1, 2, 0))
# if npimg.shape[-1] != 3:
# npimg = npimg[:, :, 0]
# #print(npimg.shape)
#
# fig = plt.figure(figsize=(20, 20))
# ax = fig.add_subplot(111)
# ax.axis('off')
# ax.set_xticklabels([])
# ax.set_yticklabels([])
#
# ax.imshow(npimg, cmap='gray')
# plt.show()
# return fig
# def plot_images(batch, padding=2, unormalize=False):
# if len(batch.shape) == 3:
# plot_image(batch, unormalize=unormalize)
# elif len(batch.shape) == 4:
# n_images = batch.shape[0]
# if n_images == 1:
# plot_image(batch[0], unormalize=unormalize)
# else:
# grid_img = torchvision.utils.make_grid(batch, nrow=int(np.ceil(np.sqrt(n_images))), padding=padding)
# plot_image(grid_img, unormalize=unormalize)
def str2bool(v):
# codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def delete_files_from_name(folder_path, file_name, type='contains'):
assert type in ['is', 'contains']
for f in os.listdir(folder_path):
if (type=='is' and file_name==f) or (type=='contains' and file_name in f):
os.remove(os.path.join(folder_path, f))
def copy_file(file_path, folder_path):
destination_path = os.path.join(folder_path, os.path.basename(file_path))
shutil.copyfile(file_path, destination_path)
def format_time(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return "%dh%02dm%02ds" % (hours, minutes, seconds)
def create_empty_file(path):
"""Easy way to log final test accuracy in some experiment folder"""
with open(path, 'w+') as f: f.write("NA")
if __name__ == '__main__':
from time import time
import torch
## Test AggregateTensor
# x = np.random.rand(1000)*50
# w = np.random.rand(1000)*5
# true_mu = w@x/np.sum(w)
# true_std = np.sqrt(np.sum(w*(x-true_mu)**2)/((len(x)-1)*np.sum(w)/len(x)))
#
# t0 = time.time()
# a = "yolo"
# print("Init of string takes: {} us".format(1e6*(time()-t0)))
#
# t0 = time.time()
# meter = AggregateTensor()
# print("Init of AggregateTensor takes: {} us".format(1e6*(time()-t0)))
#
# t0 = time.time()
# a = 1000*5
# print("Multiplication takes: {} us".format(1e6 * (time.time() - t0)))
#
# t = 0
# for val,weight in zip(x,w):
# t0 = time.time()
# meter.update(val, weight)
# t += time.time() - t0
# print("Avg update time: {} us".format(1e6*t/len(x)))
#
# print(true_mu, meter.avg())
# #print(np.std(x), true_std, meter.std())
#
# ### Test AggregateDict
# keys = ['loss', 'acc', 'yolo']
# meter = AggregateDict()
#
# values = [[1,2,3], [3,4,5], [1,1,1]]
# true_mus = [np.mean(el) for el in values]
# true_stds = [np.std(el) for el in values]
#
# for i in range(3):
# dict = {k: v[i] for k,v in zip(keys, values)}
# print(dict)
# meter.update(val=dict, w=1)
#
#
# print(true_mus, meter.avg())
#print(true_stds, meter.std())
### Test cutout
# Data loader tests
# from time import time
# import torchvision.datasets as datasets
# import torchvision.transforms as transforms
#
# device = torch.device('cpu')
# dataset_path = "~/Datasets/Pytorch/"
#
#
# transform = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# Cutout(n_holes=1, length=16, cutout_proba=0.5)])
#
# dataset = datasets.CIFAR10(dataset_path, train=False, download=True, transform=transform)
# loader = torch.utils.data.DataLoader(
# dataset=dataset,
# batch_size=5,
# shuffle=False, drop_last=False, num_workers=4)
#
# for x,y in loader:
# print(x.shape, y.shape)
# image = x[4]#*torch.Tensor[0.2023, 0.1994, 0.2010])-torch.Tensor([0.4914, 0.4822, 0.4465]
# plot_image(image)
# break
### Test entropy
# output = torch.Tensor([[0.1, 0.5, 0.4],
# [0.3,0.3,0.4],
# [0.99, 0.005, 0.005],
# [0.5, 0.5, 0.000001]])
#
# print(avg_entropy(output))
## Test Dataloader indices
# transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# dataset = datasets.MNIST("~/Datasets/Pytorch/", train=True, download=True, transform=transform)
# dataset = DatasetWithIndices(dataset)
# loader = torch.utils.data.DataLoader(dataset=dataset,batch_size=5,shuffle=True,drop_last=True,num_workers=1)
#
# cnt = 0
# for x, y, indices in loader:
# print(x.shape, y.shape, indices.shape)
# print(indices)
# if cnt>5:
# break
# cnt+=1
#print(len(dataset), len(loader))
## Test Dataloader coefficient
# transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# dataset = datasets.MNIST("~/Datasets/Pytorch/", train=True, download=True, transform=transform)
# dataset = DatasetWithLearnableCoefficients(dataset)
# loader = torch.utils.data.DataLoader(dataset=dataset,batch_size=5,shuffle=True,drop_last=True,num_workers=1)
#
# cnt = 0
# for x, y, indices in loader:
# print(x.shape, y.shape, indices.shape)
# print(indices)
# if cnt>5:
# break
# cnt+=1
#
# print(len(dataset), len(loader))
## Test Corrupter
# corrupter = Corrupter(n_images=50000, fraction_to_corrupt=0.1, n_classes=10)
# indices = torch.arange(10000, 10000+260, dtype=torch.long)
# targets = torch.arange(10, dtype=torch.long).repeat(26)
#
# t0 = time.time()
# corrupted = corrupter(indices, targets)
# print(time.time() - t0)
#
# print(corrupted)
# print(len(corrupted))
## Test Aggregate for vector
#a = torch.tensor([1,2,3])
#b = torch.tensor([3,4,5])
a = torch.FloatTensor([1])
b = torch.FloatTensor([2])
c = AggregateVector()
c.update(a)
c.update(b)
print(c.avg())
###
pass
| 11,853
| 30.442971
| 153
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/main.py
|
# -*- coding: utf-8 -*-
'''
The following libraries are used:
[1] NIFTy – Numerical Information Field Theory, https://gitlab.mpcdf.mpg.de/ift/nifty
[2] NumPy - Numerical Python, https://numpy.org/
[3] Tensorflow - Tensorflow, https://www.tensorflow.org/
[4] Keras - Keras, https://keras.io/
[5] Matplotlib - Matplotlib, https://matplotlib.org/
[6] SciPy - Scientific Python, https://www.scipy.org/
[7] random - random, https://docs.python.org/3/library/random.html
[8] sklearn - https://scikit-learn.org/
Within helper_functions.py, Conv.py and Mask.py, the following libraries are used (these may be obsolete and omittable for the core task):
[9] PIL - Pillow (only Image-function), https://pillow.readthedocs.io/en/stable/
[10] warnings - warnings, https://docs.python.org/3/library/warnings.html
[11] random - random, https://docs.python.org/3/library/random.html
[12] skimage - scikit-image (only resize-function), https://scikit-image.org
All Neural Networks were built with Keras and saved as tensorflow-objects. Neural Netowrks are optimized for MNIST, good performance is observed for
F-MNIST.
'''
# Commented out IPython magic to ensure Python compatibility.
# Colab and system related
import os
import sys
import nifty6 as ift
###
# Necessary to convert tensorflow-object (e.g. Neural Network) to Nifty-Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from operators.tensorflow_operator import TensorFlowOperator
###
import tensorflow as tf
# Include path to access helper functions and Mask / Conv Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from helper_functions import clear_axis, gaussian, get_cmap, info_text, get_noise, rotation, split_validation_set
import Mask # Masking Operator
import Conv # Convolution Operator
sys.path.remove
# Tensorflow
# Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.dpi'] = 200 # 200 e.g. is really fine, but slower
# Numerics
import random
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.stats import multivariate_normal
import sklearn as sk
from sklearn import decomposition
# Choose dataset
dataset = 'mnist' #'mnist, 'fashion_mnist'
datasource = getattr(tf.keras.datasets, dataset)
(XTrain, YTrain), (XTest, YTest) = datasource.load_data()
XTrain, XTest = XTrain / 255.0, XTest / 255.0
x_shape = XTrain[1].shape[0]
y_shape = XTrain[1].shape[1]
try:
z_shape = XTrain[1].shape[2]
img_shape = [x_shape, y_shape, z_shape]
except:
img_shape = [x_shape, y_shape]
xy_shape = x_shape * y_shape
flattened_shape = np.prod(img_shape)
# Reshape Xtrain and XTest to flattened Vectors instead of square arrays
if dataset == 'mnist' or dataset== 'fashion_mnist':
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
n_classes = len(np.unique(YTrain))
# Session for tensorflow v1 compatibility
sess = tf.compat.v1.InteractiveSession()
graph = tf.compat.v1.get_default_graph()
###
# [4]
###
# Split Training-Dataset into additional validation set.
XTrain, YTrain, XVal, YVal = split_validation_set(XTrain, YTrain, val_perc=0.2)
# Read in model#
if dataset=='mnist':
Decoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Decoder', compile=False)
Encoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Encoder', compile=False)
if dataset=='fashion_mnist':
Decoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Decoder', compile=False)
Encoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Encoder', compile=False)
# Define ift-space
# position_space: Also data-space. Equal to the vectorized image dimension. For MNIST-Images, the position-space's
# dimensions are 784x1
position_space = ift.UnstructuredDomain(Decoder_tf.get_layer(index=-1).output_shape[1:])
# n_latent: number of latent space activations
n_latent = Encoder_tf.get_layer(index=-1).output_shape[-1]
# latent_space: Domain with dimensions of the latent space
latent_space = ift.UnstructuredDomain([n_latent])
# Initialize Parameters
# Pre-Defined parameters by Max-Planck-Institute
comm, _, _, master = ift.utilities.get_MPI_params()
# Convert Encoder and Decoder to nifty-operators (``TensorFlowOperator``)
Decoder = TensorFlowOperator(Decoder_tf.layers[-1].output, Decoder_tf.layers[0].output, latent_space, position_space)
Encoder = TensorFlowOperator(Encoder_tf.layers[-1].output, Encoder_tf.layers[0].output, position_space, latent_space)
# Choose how to classify data, once it has been reconstructed (any classifier of MNIST data my be chosen here).
Classifier = Encoder
#Classifier = TensorFlowOperator(Classifier_tf.layers[-2].output, Classifier_tf.layers[0].output, position_space,ift.UnstructuredDomain(n_classes))
# Get all activations in the latent space from Encoder with Validation Dataset -> latent_values
latent_values = np.zeros((len(XVal), n_latent))
for i, pic in enumerate(XVal):
pic = np.reshape(pic, position_space.shape)
latent_values[i, :] = Encoder(ift.Field.from_raw(position_space, pic)).val
# Fill means-array with mean activation of every picture
means = np.zeros([n_latent, n_classes])
for pic in range(n_classes):
for weight in range(n_latent):
means[weight, pic] = np.mean(latent_values[np.where(YVal == pic), weight])
# Define overall mean of all activations in latent-space
mean = ift.Field.from_raw(latent_space, np.mean(latent_values, axis=0)) #mean of all activations in latent
Mean = ift.Adder(mean)
# Fill cov_all_variables with covariances of activation of every digit;
# Get cov_supervised_variables with covariances of only supervised activations
cov_all_variables = [[np.zeros([n_latent, n_latent])] for y in range(n_classes)]
cov_supervised_variables = [[np.zeros([n_classes, n_classes])] for y in range(n_classes)]
for i in range(n_classes):
cov_all_variables[i] = np.cov(latent_values[np.where(YVal==i)[0]][:,:], rowvar=False)
cov_supervised_variables[i] = np.cov(latent_values[np.where(YVal==i)[0]][:,:10], rowvar=False)
# Fill overall covariance of all activations in latent space
cov = np.zeros([n_latent, n_latent])
cov = np.cov(latent_values, rowvar=False)
# Transform covariance matrix into standardized space by Cholesky factorization
# cov = AA^T
A = ift.MatrixProductOperator(ift.UnstructuredDomain([n_latent]), np.linalg.cholesky(cov))
'''
Generate Ground Truth either
--> from Sampling from latent distribution OR
--> from drawing a sample from independent partition of dataset
'''
## Sampling from latent distribution
#xi = ift.from_random(latent_space, 'normal')
#s = A.apply(xi, 1) + mean
#ground_truth = Decoder(s)
## Drawing sample from dataset
p=3
#p = 10
ground_truth = ift.Field.from_raw(position_space, np.reshape(XTest[p], position_space.shape))
'''
Data Corruption:
1. Mask --> Operator: M (no_mask, half_mask, corner_mask, checkerboard_mask, random_mask)
2. Noise --> Operator: N
3. Convolution --> Operator: C (sobel, gaussian_blur, edge_detection, own)
Data Modification (not included in modeling-process; thus the Model "does not
know" these modifications):
4. Rotation (angle)
X. Response --> Operator: R (Concatenated Mask, Noise and Convolution)
'''
p = 10 # Specify element of XTest that is to be corrupted and to be evaluated; can be arbitrary integer within length of XTest
ground_truth = ift.Field.from_raw(position_space, np.reshape(XTest[p], position_space.shape))
# 1. Mask
M = Mask.no_mask(position_space=position_space)
#M = Mask.half_mask(position_space=position_space, mask_range=0.5)
#M = Mask.random_mask(position_space=position_space, seed=10, n_blobs=25)
# 2. Noise
N, n = get_noise(noise_level=1, position_space=position_space, seed=10)
# 3. Convolution
#C = Conv.gaussian_blur(7, 1, position_space=position_space) # sobel, edge_detection,
# 4. Rotation (not included in data-model, reconstruction may be poor!)
# Specify angle in degrees (clockwise rotation)
ground_truth_rot = rotation(ground_truth, img_shape, angle=0)
# Apply Data Corruption to Ground Truth and creeate Response operator
GR = ift.GeometryRemover(position_space)
R = GR(M) # Without Convolution
#R = GR(M @ C) # With Convolution
data = R((ground_truth_rot))+n # Apply Response R on (rotated) ground truth --> Noise is applicated after masking
plt.imshow(np.reshape(data.val, [28,28]))
# Define Hyperparameters for minimizer via Iteration-Controllers
# These Hyperparameters are not fully optimized!
ic_sampling = ift.AbsDeltaEnergyController(name='Sampling', deltaE=1e-2, iteration_limit=150)
ic_newton = ift.AbsDeltaEnergyController(name='Newton', deltaE=5e-2, iteration_limit=150)
minimizer = ift.NewtonCG(ic_newton)
'''
Define Likelihood as Gaussian Energy
mean: data (corruped image with R applied)
inverse_covariance: Inverse of Noise-Matrix N
R: Response Operator
Decoder: Generator mapping data from latent space to image space
Mean: Adder Operator; Mean of all latent Space activations
A: Product Operator; Transformed Covariance of all latent space activations
Mean and A originate from the following transformation:
s = A*xi+Mean
'''
likelihood = ift.GaussianEnergy(mean=data, inverse_covariance=N.inverse) @ R @ Decoder @ Mean @ A
H = ift.StandardHamiltonian(likelihood, ic_sampling)
# Run MGVI (Metric Gaussian Variational Inference)
n_samples = 50 # Define number of samples with which posterior distribution is approximated; more samples => higher runtime, higher accuracy
def MGVI(n_samples, H):
initial_mean = ift.Field.full(latent_space, 0.) # Define initial activation; random initialization works as well
mu = initial_mean
for i in range(5):
# Draw new samples and minimize KL
KL = ift.MetricGaussianKL(mu, H, n_samples, mirror_samples=False) # Set up KL with current mu
KL, convergence = minimizer(KL) # Minimize KL and check for convergence
mu = KL.position # Set minimized KL as new mu
KL = ift.MetricGaussianKL(mu, H, n_samples, mirror_samples=False)
KL, convergence = minimizer(KL)
return KL
iters=1 # Define number of iterations of posterior approximation. This might be helpful to check "how certain" the approximation is and if only an unstable local minimum is found
KL_iterations = []
for i in range(iters):
KL_iterations.append(MGVI(n_samples, H))
# Draw inferred signal from posterior samples and transform to original space
sc = ift.StatCalculator()
for i in range(iters):
KL = KL_iterations[i]
for sample in KL.samples:
sc.add(A.apply(sample + KL.position, 1) + mean) # Retransform signal s = A*xi+mu
posterior_mean = sc.mean # Get mean of all samples
posterior_std = ift.sqrt(sc.var) # Get standard deviation of all samples
# Classify posteriors via mahalanobis-distance and by classifying all posterior samples
# with seperatly trained network ('Classifier')
mahalanobis_distance_supervised = np.zeros([iters*n_samples, n_classes])
mahalanobis_distance = np.zeros([iters*n_samples, n_classes])
classified_posteriors = np.zeros([iters*n_samples, n_latent])
latent_posteriors = np.zeros([iters*n_samples, n_latent])
for k in range(iters):
KL = KL_iterations[k]
for j, sample in enumerate(KL.samples):
s_posterior = A.apply(sample + KL.position, 1) + mean
latent_posteriors[j+k*n_samples, :] = s_posterior.val
classified_posteriors[j+k*n_samples, :] = Classifier(Decoder(s_posterior)).val
for i in range(n_classes):
mahalanobis_distance_supervised[j+k*n_samples, i] = np.sqrt((s_posterior.val[:n_classes] - means[:n_classes,i]).T @ np.linalg.inv(cov_supervised_variables[i]) @ (s_posterior.val[:n_classes] - means[:n_classes,i]))
mahalanobis_distance[j+k*n_samples, i] = np.sqrt((s_posterior.val - means[:,i]).T @ np.linalg.inv(cov_all_variables[i]) @ (s_posterior.val - means[:,i]))
#mahalanobis_distance[j+k*n_samples, i] = np.sqrt((s_posterior.val - means[:,i]).T @ (s_posterior.val - means[:,i])) # Euclidian Distance
mahalanobis_mean = np.mean(mahalanobis_distance, axis=0)
mahalanobis_std = np.sqrt(np.var(mahalanobis_distance, axis=0))
mahalanobis_mean_supervised = np.mean(mahalanobis_distance_supervised, axis=0)
mahalanobis_std_supervised = np.sqrt(np.var(mahalanobis_distance_supervised, axis=0))
classified_mean = np.mean(classified_posteriors, axis=0)
classified_std = np.std(classified_posteriors, axis=0)
# Get all classifications of posterior samples for pie-plot visualization
classified_posteriors_nn = np.sort(np.argmax(classified_posteriors, axis=1))
classified_posteriors_dm = np.sort(np.argmin(mahalanobis_distance, axis=1))
for i in range(n_classes):
unique_digit_nn, count_nn = np.unique(classified_posteriors_nn, return_counts=True)
unique_digit_dm, count_dm = np.unique(classified_posteriors_dm, return_counts=True)
counts_nn = dict(zip(unique_digit_nn, count_nn))
counts_dm = dict(zip(unique_digit_dm, count_dm))
viridis = cm.get_cmap('viridis', n_classes)
pie_colors = viridis(np.linspace(0, 1, n_classes))
# Create dictionary with important information:
# Top scores of respective classification method (M-Dist, NN)
# True or false classification (only valid if Labels given)
# Overlapping standard-deviations
n_scores = 3 # Number of top scoring elements to be displayed (max: n_classes)
top_scores_nn = list(reversed(np.argsort(classified_mean)[-n_scores:]))
top_scores_dm = list(np.argsort(mahalanobis_mean)[:n_scores])
overlap_bottom_nn = np.zeros(n_scores-1)
overlap_bottom_dm = np.zeros(n_scores-1)
for i in range(n_scores-1):
overlap_bottom_nn[i] = (classified_mean[top_scores_nn[0]] - classified_std[top_scores_nn[0]]) - (classified_mean[top_scores_nn[i+1]] + classified_std[top_scores_nn[i+1]])
overlap_bottom_dm[i] = (mahalanobis_mean[top_scores_dm[i+1]] - mahalanobis_std[top_scores_dm[i+1]]) - (mahalanobis_mean[top_scores_dm[0]] + mahalanobis_std[top_scores_dm[0]])
keys_nn = ['Measure','Top Scores:', 'Classification:', 'ID:', 'N Samples:']
keys_dm = ['Measure','Top Scores:', 'Classification:', 'ID:', 'N Samples:', 'M-Dist of {}:'.format(top_scores_dm[0])]
if top_scores_nn[0] == YTrain[-p]:
values_nn = ['Neural Net Classifier','{}'.format(tuple(top_scores_nn)), 'True', 'YTrain[-{}]'.format(p), '{}'.format(n_samples)]
if top_scores_dm[0] == YTrain[-p]:
values_dm = ['Mahalanobis Distance','{}'.format(tuple(top_scores_dm)), 'True', 'YTrain[-{}]'.format(p), '{}'.format(n_samples), '{}'.format(mahalanobis_mean[top_scores_dm[0]])]
if top_scores_nn[0] != YTrain[-p]:
values_nn = ['Neural Net Classifier','{}'.format(tuple(top_scores_nn)), 'False', 'YTrain[-{}]'.format(p), '{}'.format(n_samples)]
if top_scores_dm[0] != YTrain[-p]:
values_dm = ['Mahalanobis Distance','{}'.format(tuple(top_scores_dm)), 'False', 'YTrain[-{}]'.format(p), '{}'.format(n_samples), '{}'.format(mahalanobis_mean[top_scores_dm[0]])]
# Store Overlapping in Dictionary, expressed in terms of sigmas/STD of top Scoring digit
for i in range(n_scores - 1):
keys_nn.append('Overlap [sigmas] {} --> {}'.format(top_scores_nn[0], top_scores_nn[i+1]))
values_nn.append(overlap_bottom_nn[i] / classified_std[top_scores_nn[0]])
keys_dm.append('Overlap [sigmas] {} --> {}'.format(top_scores_dm[0], top_scores_dm[i+1]))
values_dm.append(overlap_bottom_dm[i] / mahalanobis_std[top_scores_dm[0]])
overlapping_nn = dict(zip(keys_nn, values_nn))
overlapping_dm = dict(zip(keys_dm, values_dm))
min = np.min([posterior_mean.val])
max = np.max([posterior_mean.val])
plt.subplot(3, 4, 1)
barplot = plt.bar(range(n_classes), posterior_mean.val[0:n_classes], alpha=1, width=0.8, yerr=posterior_std.val[0:n_classes], label='MGVI with STD')
barplot[np.where(posterior_mean.val == np.max(posterior_mean.val[:10]))[0][0]].set_color('r')
plt.legend(fontsize=3)
plt.title('$h\pm\delta_r$', fontsize=8)
plt.xticks(range(n_classes), fontsize=6)
plt.yticks(fontsize=6)
plt.subplot(3, 4, 2)
barplot = plt.bar(range(n_classes), classified_mean[:10], yerr=classified_std[:10])
plt.xticks(np.arange(n_classes), fontsize=6)
plt.yticks(fontsize=6)
barplot[np.where(classified_mean == np.max(classified_mean))[0][0]].set_color('r')
plt.title('$f(g(h))\pm \delta_r$', fontsize=8)
plt.subplot(3, 4, 3)
m_mean = mahalanobis_mean_supervised
m_std = mahalanobis_std_supervised
barplot = plt.bar(range(n_classes), m_mean, yerr=m_std)
barplot[np.where(m_mean == np.min(m_mean))[0][0]].set_color('r')
for bar in barplot:
yval = bar.get_height()
yval = np.round(yval, decimals=2)
plt.annotate('{}'.format(yval),
xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize=5, rotation=45)
plt.title('$\delta_m\pm \delta_r$', fontsize=8)
plt.ylim(0, 1.3*np.max(m_mean))
plt.xticks(np.arange(n_classes), fontsize=6)
plt.yticks(fontsize=6)
plt.subplot(3, 4, 5)
plt.imshow(np.reshape(data.val, img_shape))
plt.xlabel('Mock Signal')
clear_axis()
plt.xticks(fontsize=6)
plt.yticks(fontsize=6)
plt.subplot(3, 4, 6)
plt.imshow(np.reshape(ground_truth.val, img_shape))
plt.xlabel('Ground Truth: {}'.format(YTest[p]), fontsize=8)
clear_axis()
plt.subplot(3, 4, 7)
plt.imshow(np.reshape(Decoder(posterior_mean).val, img_shape))
plt.xlabel('Reconstruction', fontsize=8)
clear_axis()
plt.subplot(3, 4, 4)
plt.pie([float(v) for v in counts_nn.values()], labels=[float(k) for k in counts_nn.keys()],autopct='%1.1f%%', colors=pie_colors[list(counts_nn.keys())], textprops={'fontsize': 4} )
plt.xlabel('Class. Post. NN', fontsize=8)
plt.subplot(3, 4, 8)
plt.pie([float(v) for v in counts_dm.values()], labels=[float(k) for k in counts_dm.keys()],autopct='%1.1f%%', colors=pie_colors[list(counts_dm.keys())], textprops={'fontsize': 4})
plt.xlabel('Class. Post. $d_M$', fontsize=8)
plt.savefig('./corrupted_data_classification/{}'.format('example_results'))
# Visualize reconstructions of all posterior samples. Output dependent on n_samples.
grid = plt.GridSpec(np.int(np.floor(np.sqrt(len(latent_posteriors)))), np.int(np.ceil(np.sqrt(len(latent_posteriors)))), wspace=0.1, hspace=0.1)
k=0
latent_posteriors=latent_posteriors[latent_posteriors[:,5].argsort()]
for i in range(np.int(np.floor(np.sqrt(len(latent_posteriors))))):
for j in range(np.int(np.ceil(np.sqrt(len(latent_posteriors))))):
if k < iters*n_samples:
plt.subplot(grid[i, j])
plt.imshow(np.reshape(Decoder(ift.Field.from_raw(latent_space, latent_posteriors[k, :])).val, img_shape), 'gray')
clear_axis()
k += 1
else:
break
fig = plt.gcf()
plt.savefig('./corrupted_data_classification/{}'.format('example_samples'))
print('Done. Results saved.')
| 18,972
| 42.71659
| 219
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/operators/multinomial_energy.py
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import nifty6 as ift
import numpy as np
class CategoricalEnergy(ift.EnergyOperator):
"""
The negative logarithm of the categorical distribution for outcomes d as a function
of the classification probabilities.
Parameters
----------
d : Nifty-Field of positive integers
The outcomes of the multinomial experiments.
scale : positive float
The scaling factor used to weight the impact of this likelihood.
"""
def __init__(self, d, scale=1.):
if not isinstance(d, ift.Field) or not np.issubdtype(d.dtype, np.integer):
raise TypeError
if not np.all(np.logical_or(d.val== 0, d.val == 1)):
raise ValueError
self._d = d
self._domain = ift.DomainTuple.make(d.domain)
self._scale = scale
def apply(self, x):
self._check_input(x)
v = -x.log().vdot(self._d) * self._scale
if not isinstance(x, ift.Linearization):
return v
if not x.want_metric:
return v
met = ift.makeOp(self._scale/(x.val))
met = ift.SandwichOperator.make(x.jac, met)
return v.add_metric(met)
| 1,779
| 36.083333
| 87
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/operators/tensorflow_operator.py
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import nifty6 as ift
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class TensorFlowOperator(ift.Operator):
"""
A wrapper for TensorFlow tensors as Nifty operators.
The Jacobian and its adjoint are calculated via TensorFlow auto-differentiaiton.
Parameters
----------
tf_op : TensorFlow Tensor
The tensor corresponding to the output layer.
argument : TensorFlow Tensor
The input tensor.
domain : Nifty Domain
The input-domain of the operator.
target : Nifty Domain
The output-domain of the operator.
add_domain_axis : boolean
Wheter to add an axis to the input to match the tensor shape. (default: False)
add_target_axis : boolean
Wheter to add an axis to the output to match the domain shape. (default: False)
"""
def __init__(self, tf_op, argument, domain, target,
add_domain_axis=False, add_target_axis=False):
self._target = ift.DomainTuple.make(target)
self._domain = ift.DomainTuple.make(domain)
self._tf_op = tf_op
self._argument = argument
if add_target_axis:
self._output_shape = (1,) + self._target.shape + (1,)
else:
self._output_shape = (1,) + self._target.shape
if add_domain_axis:
self._input_shape = (1,) + self._domain.shape + (1,)
else:
self._input_shape = (1,) + self._domain.shape
self._d_x = tf.placeholder(tf.float32, self._input_shape)
self._d_y = tf.placeholder(tf.float32, self._output_shape)
self._adjoint_jac = self.adjoint_jacobian(tf_op, self._argument, self._d_y)
self._jac = self.jacobian(tf_op, self._argument, self._d_x)
def apply(self, x):
self._check_input(x)
lin = isinstance(x, ift.Linearization)
val = x.val.val if lin else x.val
val = val.reshape(self._input_shape)
res = self._tf_op.eval(feed_dict={self._argument: val}).squeeze()
res = ift.makeField(self._target, res)
if lin:
_jac = TensorflowJacobian(self._jac, self._adjoint_jac, val,
self._argument, self._d_x, self._d_y,
self._domain, self._target, self._input_shape,
self._output_shape)
jac = _jac(x.jac)
return x.new(res, jac)
return res
def jacobian(self, y, x, d_x):
z = tf.zeros_like(y)
g = tf.gradients(y, x, grad_ys=z)
return tf.gradients(g, z, grad_ys=d_x)[0]
def adjoint_jacobian(self, y, x, d_y):
return tf.gradients(y, x, grad_ys=d_y)[0]
class TensorflowJacobian(ift.LinearOperator):
"""
The Jacobian of a TensorFlowOperator as linear Nifty operator.
Parameters
----------
jac : TensorFlow Tensor
The Jacobian of the TensorFlow tensor w.r.t. the input.
adjoint_jac : TensorFlow Tensor
The adjoint Jacobian of the TensorFlow tensor w.r.t. the input.
loc : Nifty Field
The location at which the Jacobian is evaluated.
argument : Nifty domain
The input of the original tensor.
d_x : TensorFlow Tensor
The input tensor for the Jacobian.
d_y : TensorFlow Tensor
The input tensor for the adjoint Jacobian.
domain : Nifty Domain
The input-domain of the operator.
target : Nifty Domain
The output-domain of the operator.
input_shape : tuple
The shape of the input.
output_shape : tuple
The shape of the output.
"""
def __init__(self, jac, adjoint_jac, loc, argument, d_x, d_y, domain,
target, input_shape, output_shape):
self._target = ift.DomainTuple.make(target)
self._domain = ift.DomainTuple.make(domain)
self._output_shape = output_shape
self._input_shape = input_shape
self._jac = jac
self._adjoint_jac = adjoint_jac
self._argument = argument
self._capability = self.TIMES | self.ADJOINT_TIMES
self._loc = loc
self._d_x = d_x
self._d_y = d_y
def apply(self, x, mode):
self._check_input(x, mode)
x = x.val
if mode == self.TIMES:
x = x.reshape(self._input_shape)
res = self._jac.eval(feed_dict={self._d_x: x, self._argument: self._loc})
return ift.makeField(self.target, res.squeeze())
x = x.reshape(self._output_shape)
res = self._adjoint_jac.eval(feed_dict={self._d_y: x, self._argument: self._loc})
return ift.makeField(self.domain, res.squeeze())
| 5,271
| 37.481752
| 89
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/helper_functions/helper_functions.py
|
import pandas as pd
import numpy as np
import math
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
import io
import cv2
import numpy as np
import matplotlib.pyplot as plt
import random
from skimage.transform import resize
from scipy.special import binom
import warnings
try:
import nifty6 as ift
except:
warnings.warn("Failed importing nifty6")
from PIL import Image
def clear_axis():
ax = plt.gca()
ax.axes.yaxis.set_ticks([])
ax.axes.xaxis.set_ticks([])
def convolution(colatitude):
angle = colatitude * (180 / np.pi)
return angle
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
n= 14
x_values = np.linspace(0, 1, n)
kernel = np.ones(n)
kernel = gaussian(x_values, 1, 3)
kernels = np.zeros(784)
for i in range(784//n):
kernels[i*n:(i+1)*n] = kernel
def conv(colatitude):
#plt.imshow(np.reshape(colatitude, [28, 28]))
#GT = convolve(GT, kernel=[0, 0.5, 1, 2, 3.5, 5, 3.5, 2, 1, 0.5, 0], boundary='extend')
return convolve(colatitude, kernel=[0.1, 0.5, 1, 2, 3.5, 5, 3.5, 2, 1, 0.5, 0.1], boundary='extend')
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
def info_text(overlapping_nn, overlapping_dm):
text = []
text.append('----------------------------------------------------')
text.append('{:<40} {}'.format('Key','Label'))
for k, v in overlapping_nn.items():
text.append("{:<40} {}".format(k, v))
text.append('----------------------------------------------------')
text.append('{:<40} {}'.format('Key','Label'))
for k, v in overlapping_dm.items():
text.append("{:<40} {}".format(k, v))
return text
def get_noise(noise_level, position_space, seed):
N_ift = ift.ScalingOperator(position_space, noise_level)
with ift.random.Context(seed):
n = N_ift.draw_sample_with_dtype(dtype=np.float64)
return N_ift, n # N respresents the noise operator (diagnonal covariance), n represents acutal sampled noise values
def rotation(image, img_shape, angle):
im = np.reshape(image.val, img_shape)
im = Image.fromarray(np.uint8(im*255))
im = im.rotate(angle)
im = np.asarray(im)/255
im = np.reshape(im, image.shape)
return ift.Field.from_raw(image.domain, im)
def split_validation_set(XTrain, YTrain, val_perc):
'''
Permutation of Training Dataset is inspired by an article pusblished on Medium:
https://medium.com/@mjbhobe/mnist-digits-classification-with-keras-ed6c2374bd0e
Author: Bhobeé, Manish
Date of Publication: 29.09.2018
Relevant Code Section: Permutation of Data and Cut-Out of Validation Set
Visit: 23.10.2020
Minor modifications were made on val_percent and names of variables (adjusted to
my given variable names) and dimensionality of Datasets (mine is reshaped to vectors,
the author used 2D Arrays.)
'''
# shuffle the training dataset (5 times!)
for i in range(5):
np.random.seed(i)
indexes = np.random.permutation(len(XTrain))
XTrain = XTrain[indexes]
YTrain = YTrain[indexes]
# now set-aside 20% of the train_data/labels as the
# cross-validation sets
val_perc = 0.2
val_count = int(val_perc * len(XTrain))
# first pick validation set from train_data/labels
XVal = XTrain[:val_count]
YVal = YTrain[:val_count]
# leave rest in training set
XTrain = XTrain[val_count:]
YTrain = YTrain[val_count:]
return XTrain, YTrain, XVal, YVal
| 3,772
| 30.705882
| 120
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/helper_functions/Mask.py
|
import matplotlib.pyplot as plt
import numpy as np
import io
import cv2
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import random
from skimage.transform import resize
from scipy.special import binom
import nifty6 as ift
def no_mask(position_space):
mask = np.ones(position_space.shape)
mask = ift.Field.from_raw(position_space, mask)
M = ift.DiagonalOperator(mask)
return M
def checkerboard_mask(position_space, mask_range):
x_shape = np.sqrt(position_space.shape)
y_shape = np.sqrt(position_space.shape)
xy_shape = position_space.shape
checkerboard_x = np.tile(np.array([1, 1, 0, 0]), 196)
checkerboard = np.reshape(checkerboard_x, [x_shape, y_shape]) * np.reshape(checkerboard_x, [x_shape, y_shape]).T
mask = np.reshape(checkerboard, xy_shape)
mask = ift.Field.from_raw(position_space, mask)
M = ift.DiagonalOperator(mask)
return M
def half_mask(position_space, mask_range):
mask = np.ones(position_space.shape)
x_shape = np.sqrt(position_space.shape)[0]
xy_shape = position_space.shape[0]
try:
z_shape = position_space.shape[2]
except:
z_shape = 0
Flag = False
for i in range(xy_shape):
if ((i - np.round(mask_range * x_shape)) % x_shape) == 0 or (i % x_shape) == 0:
Flag = not Flag
if Flag == False:
mask[i] = 0
else:
mask[i] = 1
if z_shape != 0:
xy_shape = position_space.shape[0] * position_space.shape[1]
x_shape = position_space.shape[0]
mask = np.ones([xy_shape, z_shape])
for z in range(z_shape):
Flag = True
for i in range(xy_shape):
print(i)
if ((i - np.round(mask_range * x_shape)) % x_shape) == 0 or (i % x_shape) == 0:
Flag = not Flag
if Flag == False:
mask[i, z] = 0
else:
mask[i, z] = 1
mask = np.reshape(mask, position_space.shape)
mask = ift.Field.from_raw(position_space, mask)
M = ift.DiagonalOperator(mask)
return M
def corner_mask(position_space, mask_range):
# Checkerboard mask for 2D mode
x_shape = np.sqrt(position_space.shape)[0]
y_shape = np.sqrt(position_space.shape)[0]
xy_shape = position_space.shape[0]
try:
z_shape = position_space.shape[2]
except:
z_shape = 0
Flag = False
mask = np.ones(position_space.shape)
for i in range(xy_shape):
if ((i - np.round(mask_range * x_shape)) % x_shape) == 0 or (i % x_shape) == 0:
Flag = not Flag
if Flag == False or (i >= xy_shape / 2):
mask[i] = 0
else:
mask[i] = 1
if z_shape != 0:
xy_shape = position_space.shape[0] * position_space.shape[1]
x_shape = position_space.shape[0]
mask = np.ones([xy_shape, z_shape])
for z in range(z_shape):
Flag = True
for i in range(xy_shape):
if ((i - np.round(mask_range * x_shape)) % x_shape) == 0 or (i % x_shape) == 0:
Flag = not Flag
if Flag == False or (i >= xy_shape / 2):
mask[i, z] = 0
else:
mask[i, z] = 1
mask = np.reshape(mask, [position_space.shape[0], position_space.shape[1], position_space.shape[2]])
mask = ift.Field.from_raw(position_space, mask)
M = ift.DiagonalOperator(mask)
return M
def window_mask(position_space, mask_range):
mask = np.ones(position_space.shape)
x_shape = np.sqrt(position_space.shape)[0]
xy_shape = position_space.shape[0]
try:
z_shape = position_space.shape[2]
except:
z_shape = 0
Flag = False
for i in range(xy_shape):
if i%x_shape==mask_range or i%x_shape==x_shape-mask_range:
Flag = not Flag
if Flag == False:
mask[i] = 0
else:
mask[i] = 1
if z_shape != 0:
xy_shape = position_space.shape[0] * position_space.shape[1]
x_shape = position_space.shape[0]
mask = np.ones([xy_shape, z_shape])
for z in range(z_shape):
Flag = True
for i in range(xy_shape):
print(i)
if ((i - np.round(mask_range * x_shape)) % x_shape) == 0 or (i % x_shape) == 0:
Flag = not Flag
if Flag == False:
mask[i, z] = 0
else:
mask[i, z] = 1
mask[0:np.int(mask_range*x_shape)]=0
mask[np.int(xy_shape)-(mask_range*np.int(x_shape)):]=0
mask = np.reshape(mask, position_space.shape)
mask = ift.Field.from_raw(position_space, mask)
M = ift.DiagonalOperator(mask)
return M
###
# [2]
###
def random_mask(n_blobs, seed, position_space):
'''
The Code for creating a 'random mask' is mainly based on the following
StackOverflow Answer published under CreativeCommons 4.0:
https://stackoverflow.com/a/50751932
Author: ImportanceOfBeingErnest [https://stackoverflow.com/users/4124317/importanceofbeingernest]
Date of Pubilshing: 08. Jun 2018
Visited: 10.09.2020
Several modifications were made on the originally published code. Among others, "blobs" are filled
with color, dimensions are adjusted to this use-case.
'''
# Plotting-Output is suppressed by plt.ioff(). Plotting is necessary for creating a random mask.
plt.ioff()
def get_curve(points, **kw):
segments = []
for i in range(len(points) - 1):
seg = Segment(points[i, :2], points[i + 1, :2], points[i, 2], points[i + 1, 2], **kw)
segments.append(seg)
curve = np.concatenate([s.curve for s in segments])
return segments, curve
def ccw_sort(p):
d = p - np.mean(p, axis=0)
s = np.arctan2(d[:, 0], d[:, 1])
return p[np.argsort(s), :]
bernstein = lambda n, k, t: binom(n,k)* t**k * (1.-t)**(n-k)
def bezier(points, num=200):
N = len(points)
t = np.linspace(0, 1, num=num)
curve = np.zeros((num, 2))
for i in range(N):
curve += np.outer(bernstein(N - 1, i, t), points[i])
return curve
class Segment():
def __init__(self, p1, p2, angle1, angle2, **kw):
self.p1 = p1; self.p2 = p2
self.angle1 = angle1; self.angle2 = angle2
self.numpoints = kw.get("numpoints", 100)
r = kw.get("r", 0.3)
d = np.sqrt(np.sum((self.p2-self.p1)**2))
self.r = r*d
self.p = np.zeros((4,2))
self.p[0,:] = self.p1[:]
self.p[3,:] = self.p2[:]
self.calc_intermediate_points(self.r)
def calc_intermediate_points(self,r):
self.p[1,:] = self.p1 + np.array([self.r*np.cos(self.angle1),
self.r*np.sin(self.angle1)])
self.p[2,:] = self.p2 + np.array([self.r*np.cos(self.angle2+np.pi),
self.r*np.sin(self.angle2+np.pi)])
self.curve = bezier(self.p,self.numpoints)
def get_bezier_curve(a, rad=0.2, edgy=0):
np.random.seed(10)
""" given an array of points *a*, create a curve through
those points.
*rad* is a number between 0 and 1 to steer the distance of
control points.
*edgy* is a parameter which controls how "edgy" the curve is,
edgy=0 is smoothest."""
p = np.arctan(edgy) / np.pi + .5
a = ccw_sort(a)
a = np.append(a, np.atleast_2d(a[0, :]), axis=0)
d = np.diff(a, axis=0)
ang = np.arctan2(d[:, 1], d[:, 0])
f = lambda ang: (ang >= 0) * ang + (ang < 0) * (ang + 2 * np.pi)
ang = f(ang)
ang1 = ang
ang2 = np.roll(ang, 1)
ang = p * ang1 + (1 - p) * ang2 + (np.abs(ang2 - ang1) > np.pi) * np.pi
ang = np.append(ang, [ang[0]])
a = np.append(a, np.atleast_2d(ang).T, axis=1)
s, c = get_curve(a, r=rad, method="var")
x, y = c.T
return x, y, a
def get_random_points(n=5, scale=0.8, mindst=5, rec=0):
""" create n random points in the unit square, which are *mindst*
apart, then scale them."""
mindst = mindst or .7 / n
a = np.random.rand(n, 2)
d = np.sqrt(np.sum(np.diff(ccw_sort(a), axis=0), axis=1) ** 2)
if np.all(d >= mindst) or rec >= 200:
return a * scale
else:
return get_random_points(n=n, scale=scale, mindst=mindst, rec=rec + 1)
fig = plt.figure()
rad = 0.5
edgy = 0.6
random.seed(seed)
for i, c in enumerate([[random.uniform(0, 1) for x in range(2)] for y in range(n_blobs)]):
np.random.seed(i + seed)
a = get_random_points(n=7, scale=0.2) + c
x, y, _ = get_bezier_curve(a, rad=rad, edgy=edgy)
plt.plot(x, y, c='black')
plt.fill_between(x, y)
plt.axis('off')
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data = (data[:, :, 0] + data[:, :, 1] + data[:, :, 2]) / 3
data = data / np.max(data)
data[data > 0.99] = 1
data[data != 1] = 0
data = resize(data, [50, 50])
data[data < 0.75] = 0
data[data >= 0.75] = 1
if 50 - position_space.shape[0] - 10 > 0:
data = data[50 - position_space.shape[0] - 10:50 - 10, 50 - position_space.shape[0] - 10:50 - 10]
data = np.reshape(data, position_space.shape[0] * position_space.shape[1])
data_3D = np.zeros([32,32,3])
data_3D[:,:,0] = np.reshape(data, [32, 32])
data_3D[:,:,1] = np.reshape(data, [32, 32])
data_3D[:,:,2] = np.reshape(data, [32, 32])
data = data_3D
else:
data = data[50 - np.int(np.sqrt(position_space.shape[0])) - 10:50 - 10,
50 - np.int(np.sqrt(position_space.shape[0])) - 10:50 - 10]
data = np.reshape(data, position_space.shape[0])
data = np.array(data)
# Restore original plotting settings as these were overwritten by plt.ioff()
plt.close()
plt.ion()
mpl.rcParams['figure.dpi']= 200
mpl.rcParams['font.size'] = 9.0
mask = ift.Field.from_raw(position_space, data)
M = ift.DiagonalOperator(mask)
return M
| 10,324
| 35.22807
| 116
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/helper_functions/Conv.py
|
'''
The Code for creating a 'convolution' [Conv.py] is mainly based on the following
GitHub Repository "Convolution as Matrix Multiplication":
https://github.com/alisaaalehi/convolution_as_multiplication
Author: Salehi, Ali, [https://github.com/alisaaalehi]
Date of last commit by author: 08. Jun 2019
Visited: 21.09.2020
Modifications were only made on output shape and used filter/convolution matrix.
'''
import numpy as np
import scipy
from scipy.linalg import toeplitz
import nifty6 as ift
def sobel(amplitude, position_space):
F = np.array([[-1, 0, 1], [-2, 0, 2],[-1, 0, 1]]) * amplitude # sobel
# number of columns and rows of the filter
F_row_num, F_col_num = F.shape
# calculate the output dimensions
try:
output_row_num = position_space.shape[0]
output_col_num = position_space.shape[1]
except:
output_row_num = np.int(np.sqrt(position_space.shape[0]))
output_col_num = output_row_num
# zero pad the filter
F_zero_padded = np.pad(F, ((output_row_num - F_row_num, 0),
(0, output_col_num - F_col_num)),
'constant', constant_values=0)
toeplitz_list = []
for i in range(F_zero_padded.shape[0]-1, -1, -1): # iterate from last row to the first row
c = F_zero_padded[i, :] # i th row of the F
r = np.r_[c[0], np.zeros(output_col_num-1)] # first row for the toeplitz fuction should be defined otherwise
# the result is wrong
toeplitz_m = toeplitz(c,r) # this function is in scipy.linalg library
toeplitz_list.append(toeplitz_m)
# doubly blocked toeplitz indices:
# this matrix defines which toeplitz matrix from toeplitz_list goes to which part of the doubly blocked
c = range(1, F_zero_padded.shape[0]+1)
r = np.r_[c[0], np.zeros(output_row_num-1, dtype=int)]
doubly_indices = toeplitz(c, r)
## creat doubly blocked matrix with zero values
toeplitz_shape = toeplitz_list[0].shape # shape of one toeplitz matrix
h = toeplitz_shape[0]*doubly_indices.shape[0]
w = toeplitz_shape[1]*doubly_indices.shape[1]
doubly_blocked_shape = [h, w]
doubly_blocked = np.zeros(doubly_blocked_shape)
# tile toeplitz matrices for each row in the doubly blocked matrix
b_h, b_w = toeplitz_shape # hight and withs of each block
for i in range(doubly_indices.shape[0]):
for j in range(doubly_indices.shape[1]):
start_i = i * b_h
start_j = j * b_w
end_i = start_i + b_h
end_j = start_j + b_w
doubly_blocked[start_i: end_i, start_j:end_j] = toeplitz_list[doubly_indices[i,j]-1]
conv_matrix = doubly_blocked
if len(position_space.shape)==3:
padded_conv = np.zeros([3072, 3072])
padded_conv[:1024, :1024] = conv_matrix
padded_conv[1024:2048, 1024:2048] = np.eye(1024)
padded_conv[2048:, 2048:] = np.eye(1024)
padded_conv1 = np.eye(3072)
for i in range(0, 3072, 3):
for j in range(0, 3072, 3):
padded_conv1[i, j] = conv_matrix[i//3, j//3]
padded_conv2 = np.eye(3072)
for i in range(1, 3072, 3):
for j in range(1, 3072, 3):
padded_conv2[i, j] = conv_matrix[i//3, j//3]
padded_conv3 = np.eye(3072)
for i in range(2, 3072, 3):
for j in range(2, 3072, 3):
padded_conv3[i, j] = conv_matrix[i//3, j//3]
C1 = ift.MatrixProductOperator(position_space, padded_conv1, flatten=True)
C2 = ift.MatrixProductOperator(position_space, padded_conv2, flatten=True)
C3 = ift.MatrixProductOperator(position_space, padded_conv3, flatten=True)
C = C1@C2@C3
return C
else:
return ift.MatrixProductOperator(position_space, conv_matrix)
def gaussian_blur(kernel_size, amplitude, position_space):
def gkern(l=5, sig=1.):
"""\
Copyright: https://stackoverflow.com/a/43346070
21.11.2020
creates gaussian kernel with side length l and a sigma of sig
"""
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))
return kernel / np.sum(kernel)
F = gkern(l=kernel_size, sig=amplitude)
# number of columns and rows of the filter
F_row_num, F_col_num = F.shape
# calculate the output dimensions
try:
output_row_num = position_space.shape[0]
output_col_num = position_space.shape[1]
except:
output_row_num = np.int(np.sqrt(position_space.shape[0]))
output_col_num = output_row_num
# zero pad the filter
F_zero_padded = np.pad(F, ((output_row_num - F_row_num, 0),
(0, output_col_num - F_col_num)),
'constant', constant_values=0)
toeplitz_list = []
for i in range(F_zero_padded.shape[0]-1, -1, -1): # iterate from last row to the first row
c = F_zero_padded[i, :] # i th row of the F
r = np.r_[c[0], np.zeros(output_col_num-1)] # first row for the toeplitz fuction should be defined otherwise
# the result is wrong
toeplitz_m = toeplitz(c,r) # this function is in scipy.linalg library
toeplitz_list.append(toeplitz_m)
# doubly blocked toeplitz indices:
# this matrix defines which toeplitz matrix from toeplitz_list goes to which part of the doubly blocked
c = range(1, F_zero_padded.shape[0]+1)
r = np.r_[c[0], np.zeros(output_row_num-1, dtype=int)]
doubly_indices = toeplitz(c, r)
## creat doubly blocked matrix with zero values
toeplitz_shape = toeplitz_list[0].shape # shape of one toeplitz matrix
h = toeplitz_shape[0]*doubly_indices.shape[0]
w = toeplitz_shape[1]*doubly_indices.shape[1]
doubly_blocked_shape = [h, w]
doubly_blocked = np.zeros(doubly_blocked_shape)
# tile toeplitz matrices for each row in the doubly blocked matrix
b_h, b_w = toeplitz_shape # hight and withs of each block
for i in range(doubly_indices.shape[0]):
for j in range(doubly_indices.shape[1]):
start_i = i * b_h
start_j = j * b_w
end_i = start_i + b_h
end_j = start_j + b_w
doubly_blocked[start_i: end_i, start_j:end_j] = toeplitz_list[doubly_indices[i,j]-1]
conv_matrix = doubly_blocked
if len(position_space.shape)==3:
padded_conv = np.zeros([3072, 3072])
padded_conv[:1024, :1024] = conv_matrix
padded_conv[1024:2048, 1024:2048] = np.eye(1024)
padded_conv[2048:, 2048:] = np.eye(1024)
padded_conv1 = np.eye(3072)
for i in range(0, 3072, 3):
for j in range(0, 3072, 3):
padded_conv1[i, j] = conv_matrix[i//3, j//3]
padded_conv2 = np.eye(3072)
for i in range(1, 3072, 3):
for j in range(1, 3072, 3):
padded_conv2[i, j] = conv_matrix[i//3, j//3]
padded_conv3 = np.eye(3072)
for i in range(2, 3072, 3):
for j in range(2, 3072, 3):
padded_conv3[i, j] = conv_matrix[i//3, j//3]
C1 = ift.MatrixProductOperator(position_space, padded_conv1, flatten=True)
C2 = ift.MatrixProductOperator(position_space, padded_conv2, flatten=True)
C3 = ift.MatrixProductOperator(position_space, padded_conv3, flatten=True)
C = C1@C2@C3
return C
else:
return ift.MatrixProductOperator(position_space, conv_matrix)
def edge_detection(amplitude, position_space):
F = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])*1/16 * amplitude # Edge-Detection
# number of columns and rows of the filter
F_row_num, F_col_num = F.shape
# calculate the output dimensions
try:
output_row_num = position_space.shape[0]
output_col_num = position_space.shape[1]
except:
output_row_num = np.int(np.sqrt(position_space.shape[0]))
output_col_num = output_row_num
# zero pad the filter
F_zero_padded = np.pad(F, ((output_row_num - F_row_num, 0),
(0, output_col_num - F_col_num)),
'constant', constant_values=0)
toeplitz_list = []
for i in range(F_zero_padded.shape[0]-1, -1, -1): # iterate from last row to the first row
c = F_zero_padded[i, :] # i th row of the F
r = np.r_[c[0], np.zeros(output_col_num-1)] # first row for the toeplitz fuction should be defined otherwise
# the result is wrong
toeplitz_m = toeplitz(c,r) # this function is in scipy.linalg library
toeplitz_list.append(toeplitz_m)
# doubly blocked toeplitz indices:
# this matrix defines which toeplitz matrix from toeplitz_list goes to which part of the doubly blocked
c = range(1, F_zero_padded.shape[0]+1)
r = np.r_[c[0], np.zeros(output_row_num-1, dtype=int)]
doubly_indices = toeplitz(c, r)
## creat doubly blocked matrix with zero values
toeplitz_shape = toeplitz_list[0].shape # shape of one toeplitz matrix
h = toeplitz_shape[0]*doubly_indices.shape[0]
w = toeplitz_shape[1]*doubly_indices.shape[1]
doubly_blocked_shape = [h, w]
doubly_blocked = np.zeros(doubly_blocked_shape)
# tile toeplitz matrices for each row in the doubly blocked matrix
b_h, b_w = toeplitz_shape # hight and withs of each block
for i in range(doubly_indices.shape[0]):
for j in range(doubly_indices.shape[1]):
start_i = i * b_h
start_j = j * b_w
end_i = start_i + b_h
end_j = start_j + b_w
doubly_blocked[start_i: end_i, start_j:end_j] = toeplitz_list[doubly_indices[i,j]-1]
conv_matrix = doubly_blocked
if len(position_space.shape)==3:
padded_conv = np.zeros([3072, 3072])
padded_conv[:1024, :1024] = conv_matrix
padded_conv[1024:2048, 1024:2048] = np.eye(1024)
padded_conv[2048:, 2048:] = np.eye(1024)
padded_conv1 = np.eye(3072)
for i in range(0, 3072, 3):
for j in range(0, 3072, 3):
padded_conv1[i, j] = conv_matrix[i//3, j//3]
padded_conv2 = np.eye(3072)
for i in range(1, 3072, 3):
for j in range(1, 3072, 3):
padded_conv2[i, j] = conv_matrix[i//3, j//3]
padded_conv3 = np.eye(3072)
for i in range(2, 3072, 3):
for j in range(2, 3072, 3):
padded_conv3[i, j] = conv_matrix[i//3, j//3]
C1 = ift.MatrixProductOperator(position_space, padded_conv1, flatten=True)
C2 = ift.MatrixProductOperator(position_space, padded_conv2, flatten=True)
C3 = ift.MatrixProductOperator(position_space, padded_conv3, flatten=True)
C = C1@C2@C3
return C
else:
return ift.MatrixProductOperator(position_space, conv_matrix)
def own(amplitude, conv_matrix, position_space):
F = conv_matrix*amplitude
# number of columns and rows of the filter
F_row_num, F_col_num = F.shape
# calculate the output dimensions
try:
output_row_num = position_space.shape[0]
output_col_num = position_space.shape[1]
except:
output_row_num = np.int(np.sqrt(position_space.shape[0]))
output_col_num = output_row_num
# zero pad the filter
F_zero_padded = np.pad(F, ((output_row_num - F_row_num, 0),
(0, output_col_num - F_col_num)),
'constant', constant_values=0)
toeplitz_list = []
for i in range(F_zero_padded.shape[0]-1, -1, -1): # iterate from last row to the first row
c = F_zero_padded[i, :] # i th row of the F
r = np.r_[c[0], np.zeros(output_col_num-1)] # first row for the toeplitz fuction should be defined otherwise
# the result is wrong
toeplitz_m = toeplitz(c,r) # this function is in scipy.linalg library
toeplitz_list.append(toeplitz_m)
# doubly blocked toeplitz indices:
# this matrix defines which toeplitz matrix from toeplitz_list goes to which part of the doubly blocked
c = range(1, F_zero_padded.shape[0]+1)
r = np.r_[c[0], np.zeros(output_row_num-1, dtype=int)]
doubly_indices = toeplitz(c, r)
## creat doubly blocked matrix with zero values
toeplitz_shape = toeplitz_list[0].shape # shape of one toeplitz matrix
h = toeplitz_shape[0]*doubly_indices.shape[0]
w = toeplitz_shape[1]*doubly_indices.shape[1]
doubly_blocked_shape = [h, w]
doubly_blocked = np.zeros(doubly_blocked_shape)
# tile toeplitz matrices for each row in the doubly blocked matrix
b_h, b_w = toeplitz_shape # hight and withs of each block
for i in range(doubly_indices.shape[0]):
for j in range(doubly_indices.shape[1]):
start_i = i * b_h
start_j = j * b_w
end_i = start_i + b_h
end_j = start_j + b_w
doubly_blocked[start_i: end_i, start_j:end_j] = toeplitz_list[doubly_indices[i,j]-1]
conv_matrix = doubly_blocked
if len(position_space.shape)==3:
padded_conv = np.zeros([3072, 3072])
padded_conv[:1024, :1024] = conv_matrix
padded_conv[1024:2048, 1024:2048] = np.eye(1024)
padded_conv[2048:, 2048:] = np.eye(1024)
padded_conv1 = np.eye(3072)
for i in range(0, 3072, 3):
for j in range(0, 3072, 3):
padded_conv1[i, j] = conv_matrix[i//3, j//3]
padded_conv2 = np.eye(3072)
for i in range(1, 3072, 3):
for j in range(1, 3072, 3):
padded_conv2[i, j] = conv_matrix[i//3, j//3]
padded_conv3 = np.eye(3072)
for i in range(2, 3072, 3):
for j in range(2, 3072, 3):
padded_conv3[i, j] = conv_matrix[i//3, j//3]
C1 = ift.MatrixProductOperator(position_space, padded_conv1, flatten=True)
C2 = ift.MatrixProductOperator(position_space, padded_conv2, flatten=True)
C3 = ift.MatrixProductOperator(position_space, padded_conv3, flatten=True)
C = C1@C2@C3
return C
else:
return ift.MatrixProductOperator(position_space, conv_matrix)
| 14,422
| 38.952909
| 114
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/NNs/Fashion-MNIST/pretrained_supervised_ae10/autoencoder_fmnist.py
|
# -*- coding: utf-8 -*-
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
# Commented out IPython magic to ensure Python compatibility.
# Colab and system related
import os
import sys
###
# Necessary to convert tensorflow-object (e.g. Neural Network) to Nifty-Operator
sys.path.append('corrupted_data_classification/helper_functions/')
###
import tensorflow as tf
# Include path to access helper functions and Mask / Conv Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from helper_functions import clear_axis, gaussian, get_cmap, info_text, get_noise, rotation, split_validation_set
import Mask # Masking Operator
import Conv # Convolution Operator
sys.path.remove
# Tensorflow
# Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.dpi'] = 200 # 200 e.g. is really fine, but slower
# Numerics
import random
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.stats import multivariate_normal
import sklearn as sk
from sklearn import decomposition
# Load MNIST Dataset
mnist = tf.keras.datasets.fashion_mnist
(XTrain, YTrain), (XTest, YTest) = mnist.load_data()
XTrain, XTest = XTrain / 255.0, XTest / 255.0
# Cut out last 100 Training images for comparison
XTrain = XTrain[0:-100]
YTrain = YTrain[0:-100]
# Reshape Xtrain and XTest to 1x784 Vectors instead of 28x28 arrays
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
XTrain, YTrain, XVal, YVal = split_validation_set(XTrain, YTrain, val_perc=0.2)
def autoencoder_deep(latent_space_size):
Input = tf.keras.layers.Input(shape=784)
h1 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(Input)
h2 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h1)
h3 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(h2)
encoded = tf.keras.layers.Dense(latent_space_size, activation='linear',
activity_regularizer=tf.keras.regularizers.L2(0.1))(h3)
# Decoder
Decoder_Input = tf.keras.layers.Input(shape=latent_space_size) # Input for Decoder
h5 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(Decoder_Input)
h6 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h5)
h7 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(h6)
decoded = tf.keras.layers.Dense(784, activation='sigmoid')(h7)
# Decouple Encoder and Decoder from overall model
Encoder = tf.keras.Model(Input, encoded)
Decoder = tf.keras.Model(Decoder_Input, decoded)
decoded = Decoder(encoded)
model = tf.keras.Model(Input, [decoded, encoded])
return Encoder, Decoder, model
Encoder, Decoder, model = autoencoder_deep(10)
# Loss Function for Reconstruction of images (i.e. overall Autoencoder)
def loss_fn_AE(y_true, y_pred):
# y_pred = tf.nn.elu(y_pred) * tf.nn.softplus(y_pred)
# return tf.losses.categorical_crossentropy(y_true, y_pred)
# y_pred = tf.nn.softmax(y_pred)
return tf.losses.binary_crossentropy(y_true,y_pred)
#return tf.keras.losses.MeanSquaredError(y_true, y_pred)
# Loss Function for Classification of Images in latent space
def loss_fn_Encoder(y_true, y_pred):
y_pred = tf.nn.softmax(y_pred)
return tf.losses.sparse_categorical_crossentropy(y_true, y_pred)
# Training Options
model.compile(optimizer='adam',
#loss=[loss_fn_AE, loss_fn_Encoder],
loss=[loss_fn_AE, loss_fn_Encoder],
metrics=['accuracy'])
# Training and Testing
# Training and Testing
with tf.device('/device:GPU:0'):
results = model.fit(XTrain, [XTrain, YTrain], epochs=25)
model.evaluate(XTest, [XTest, YTest], verbose=2)
# Save trained Decoder and trained Encoder
Decoder.save('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Decoder/', save_format='tf')
Encoder.save('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Encoder/', save_format='tf')
| 4,227
| 40.048544
| 119
|
py
|
corrupted_data_classification
|
corrupted_data_classification-main/NNs/MNIST/pretrained_supervised_ae10/autoencoder.py
|
# -*- coding: utf-8 -*-
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
# Commented out IPython magic to ensure Python compatibility.
# Colab and system related
import os
import sys
###
# Necessary to convert tensorflow-object (e.g. Neural Network) to Nifty-Operator
sys.path.append('corrupted_data_classification/helper_functions/')
###
import tensorflow as tf
# Include path to access helper functions and Mask / Conv Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from helper_functions import clear_axis, gaussian, get_cmap, info_text, get_noise, rotation, split_validation_set
import Mask # Masking Operator
import Conv # Convolution Operator
sys.path.remove
# Tensorflow
# Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.dpi'] = 200 # 200 e.g. is really fine, but slower
# Numerics
import random
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.stats import multivariate_normal
import sklearn as sk
from sklearn import decomposition
# Load MNIST Dataset
mnist = tf.keras.datasets.mnist
(XTrain, YTrain), (XTest, YTest) = mnist.load_data()
XTrain, XTest = XTrain / 255.0, XTest / 255.0
# Cut out last 100 Training images for comparison
XTrain = XTrain[0:-100]
YTrain = YTrain[0:-100]
# Reshape Xtrain and XTest to 1x784 Vectors instead of 28x28 arrays
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
XTrain, YTrain, XVal, YVal = split_validation_set(XTrain, YTrain, val_perc=0.2)
def autoencoder_deep(latent_space_size):
Input = tf.keras.layers.Input(shape=784)
h1 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(Input)
h2 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h1)
h3 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(h2)
encoded = tf.keras.layers.Dense(latent_space_size, activation='linear',
activity_regularizer=tf.keras.regularizers.L2(0.001))(h3)
# Decoder
Decoder_Input = tf.keras.layers.Input(shape=latent_space_size) # Input for Decoder
h5 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(Decoder_Input)
h6 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h5)
h7 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(h6)
decoded = tf.keras.layers.Dense(784, activation='sigmoid')(h7)
# Decouple Encoder and Decoder from overall model
Encoder = tf.keras.Model(Input, encoded)
Decoder = tf.keras.Model(Decoder_Input, decoded)
decoded = Decoder(encoded)
model = tf.keras.Model(Input, [decoded, encoded])
return Encoder, Decoder, model
Encoder, Decoder, model = autoencoder_deep(10)
# Loss Function for Reconstruction of images (i.e. overall Autoencoder)
def loss_fn_AE(y_true, y_pred):
# y_pred = tf.nn.elu(y_pred) * tf.nn.softplus(y_pred)
# return tf.losses.categorical_crossentropy(y_true, y_pred)
# y_pred = tf.nn.softmax(y_pred)
return tf.losses.binary_crossentropy(y_true,y_pred)
#return tf.keras.losses.MeanSquaredError(y_true, y_pred)
# Loss Function for Classification of Images in latent space
def loss_fn_Encoder(y_true, y_pred):
y_pred = tf.nn.softmax(y_pred)
return tf.losses.sparse_categorical_crossentropy(y_true, y_pred)
# Training Options
model.compile(optimizer='adam',
#loss=[loss_fn_AE, loss_fn_Encoder],
loss=[loss_fn_AE, loss_fn_Encoder],
metrics=['accuracy'])
# Training and Testing
results = model.fit(XTrain, [XTrain, YTrain], epochs=25)
model.evaluate(XTest, [XTest, YTest], verbose=2)
# Save trained Decoder and trained Encoder
Decoder.save('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Decoder/', save_format='tf')
Encoder.save('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Encoder/', save_format='tf')
plt.plot(results.history['dense_3_accuracy'])
| 4,195
| 39.346154
| 113
|
py
|
mmyolo
|
mmyolo-main/setup.py
|
#!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmyolo/version.py'
def get_version():
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath) as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
yield from parse_line(line)
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmyolo', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmyolo',
version=get_version(),
description='OpenMMLab Toolbox of YOLO',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMYOLO Contributors',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmyolo',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='GPL License 3.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'mim': parse_requirements('requirements/mminstall.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 6,862
| 34.744792
| 125
|
py
|
mmyolo
|
mmyolo-main/tools/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmengine.config import Config, ConfigDict, DictAction
from mmengine.evaluator import DumpResults
from mmengine.runner import Runner
from mmyolo.registry import RUNNERS
from mmyolo.utils import is_metainfo_lower
# TODO: support fuse_conv_bn
def parse_args():
parser = argparse.ArgumentParser(
description='MMYOLO test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='output result file (must be a .pkl file) in pickle format')
parser.add_argument(
'--json-prefix',
type=str,
help='the prefix of the output json file without perform evaluation, '
'which is useful when you want to format the result to a specific '
'format and submit it to the test server')
parser.add_argument(
'--tta',
action='store_true',
help='Whether to use test time augmentation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--deploy',
action='store_true',
help='Switch model to deployment mode')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
# cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
if args.deploy:
cfg.custom_hooks.append(dict(type='SwitchToDeployHook'))
# add `format_only` and `outfile_prefix` into cfg
if args.json_prefix is not None:
cfg_json = {
'test_evaluator.format_only': True,
'test_evaluator.outfile_prefix': args.json_prefix
}
cfg.merge_from_dict(cfg_json)
# Determine whether the custom metainfo fields are all lowercase
is_metainfo_lower(cfg)
if args.tta:
assert 'tta_model' in cfg, 'Cannot find ``tta_model`` in config.' \
" Can't use tta !"
assert 'tta_pipeline' in cfg, 'Cannot find ``tta_pipeline`` ' \
"in config. Can't use tta !"
cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model)
test_data_cfg = cfg.test_dataloader.dataset
while 'dataset' in test_data_cfg:
test_data_cfg = test_data_cfg['dataset']
# batch_shapes_cfg will force control the size of the output image,
# it is not compatible with tta.
if 'batch_shapes_cfg' in test_data_cfg:
test_data_cfg.batch_shapes_cfg = None
test_data_cfg.pipeline = cfg.tta_pipeline
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpResults(out_file_path=args.out))
# start testing
runner.test()
if __name__ == '__main__':
main()
| 5,443
| 35.05298
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/train.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.runner import Runner
from mmyolo.registry import RUNNERS
from mmyolo.utils import is_metainfo_lower
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
# cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# Determine whether the custom metainfo fields are all lowercase
is_metainfo_lower(cfg)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
| 3,969
| 33.224138
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/misc/download_dataset.py
|
import argparse
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import TarFile
from zipfile import ZipFile
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Download datasets for training')
parser.add_argument(
'--dataset-name', type=str, help='dataset name', default='coco2017')
parser.add_argument(
'--save-dir',
type=str,
help='the dir to save dataset',
default='data/coco')
parser.add_argument(
'--unzip',
action='store_true',
help='whether unzip dataset or not, zipped files will be saved')
parser.add_argument(
'--delete',
action='store_true',
help='delete the download zipped files')
parser.add_argument(
'--threads', type=int, help='number of threading', default=4)
args = parser.parse_args()
return args
def download(url, dir, unzip=True, delete=False, threads=1):
def download_one(url, dir):
f = dir / Path(url).name
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print(f'Downloading {url} to {f}')
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and f.suffix in ('.zip', '.tar'):
print(f'Unzipping {f.name}')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir)
elif f.suffix == '.tar':
TarFile(f).extractall(path=dir)
if delete:
f.unlink()
print(f'Delete {f}')
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def main():
args = parse_args()
path = Path(args.save_dir)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
data2url = dict(
# TODO: Support for downloading Panoptic Segmentation of COCO
coco2017=[
'http://images.cocodataset.org/zips/train2017.zip',
'http://images.cocodataset.org/zips/val2017.zip',
'http://images.cocodataset.org/zips/test2017.zip',
'http://images.cocodataset.org/annotations/' +
'annotations_trainval2017.zip'
],
lvis=[
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
],
voc2007=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa
],
voc2012=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', # noqa
],
balloon=[
# src link: https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip # noqa
'https://download.openmmlab.com/mmyolo/data/balloon_dataset.zip'
],
cat=[
'https://download.openmmlab.com/mmyolo/data/cat_dataset.zip' # noqa
],
)
url = data2url.get(args.dataset_name, None)
if url is None:
print('Only support COCO, VOC, balloon, cat and LVIS now!')
return
download(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
if __name__ == '__main__':
main()
| 3,814
| 32.761062
| 113
|
py
|
mmyolo
|
mmyolo-main/tools/misc/print_config.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmdet.utils import replace_cfg_vals, update_data_root
from mmengine import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--save-path',
default=None,
help='save path of whole config, suffixed with .py, .json or .yml')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if args.save_path is not None:
save_path = args.save_path
suffix = os.path.splitext(save_path)[-1]
assert suffix in ['.py', '.json', '.yml']
if not os.path.exists(os.path.split(save_path)[0]):
os.makedirs(os.path.split(save_path)[0])
cfg.dump(save_path)
print(f'Config saving at {save_path}')
if __name__ == '__main__':
main()
| 1,796
| 28.95
| 78
|
py
|
mmyolo
|
mmyolo-main/tools/misc/publish_model.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
if 'message_hub' in checkpoint:
del checkpoint['message_hub']
if 'ema_state_dict' in checkpoint:
del checkpoint['ema_state_dict']
for key in list(checkpoint['state_dict']):
if key.startswith('data_preprocessor'):
checkpoint['state_dict'].pop(key)
elif 'priors_base_sizes' in key:
checkpoint['state_dict'].pop(key)
elif 'grid_offset' in key:
checkpoint['state_dict'].pop(key)
elif 'prior_inds' in key:
checkpoint['state_dict'].pop(key)
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,744
| 29.086207
| 78
|
py
|
mmyolo
|
mmyolo-main/tools/misc/extract_subcoco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Extracting subsets from coco2017 dataset.
This script is mainly used to debug and verify the correctness of the
program quickly.
The root folder format must be in the following format:
├── root
│ ├── annotations
│ ├── train2017
│ ├── val2017
│ ├── test2017
Currently, only support COCO2017. In the future will support user-defined
datasets of standard coco JSON format.
Example:
python tools/misc/extract_subcoco.py ${ROOT} ${OUT_DIR} --num-img ${NUM_IMG}
"""
import argparse
import os.path as osp
import shutil
import mmengine
import numpy as np
from pycocotools.coco import COCO
# TODO: Currently only supports coco2017
def _process_data(args,
in_dataset_type: str,
out_dataset_type: str,
year: str = '2017'):
assert in_dataset_type in ('train', 'val')
assert out_dataset_type in ('train', 'val')
int_ann_file_name = f'annotations/instances_{in_dataset_type}{year}.json'
out_ann_file_name = f'annotations/instances_{out_dataset_type}{year}.json'
ann_path = osp.join(args.root, int_ann_file_name)
json_data = mmengine.load(ann_path)
new_json_data = {
'info': json_data['info'],
'licenses': json_data['licenses'],
'categories': json_data['categories'],
'images': [],
'annotations': []
}
area_dict = {
'small': [0., 32 * 32],
'medium': [32 * 32, 96 * 96],
'large': [96 * 96, float('inf')]
}
coco = COCO(ann_path)
# filter annotations by category ids and area range
areaRng = area_dict[args.area_size] if args.area_size else []
catIds = coco.getCatIds(args.classes) if args.classes else []
ann_ids = coco.getAnnIds(catIds=catIds, areaRng=areaRng)
ann_info = coco.loadAnns(ann_ids)
# get image ids by anns set
filter_img_ids = {ann['image_id'] for ann in ann_info}
filter_img = coco.loadImgs(filter_img_ids)
# shuffle
np.random.shuffle(filter_img)
num_img = args.num_img if args.num_img > 0 else len(filter_img)
if num_img > len(filter_img):
print(
f'num_img is too big, will be set to {len(filter_img)}, '
'because of not enough image after filter by classes and area_size'
)
num_img = len(filter_img)
progress_bar = mmengine.ProgressBar(num_img)
for i in range(num_img):
file_name = filter_img[i]['file_name']
image_path = osp.join(args.root, in_dataset_type + year, file_name)
ann_ids = coco.getAnnIds(
imgIds=[filter_img[i]['id']], catIds=catIds, areaRng=areaRng)
img_ann_info = coco.loadAnns(ann_ids)
new_json_data['images'].append(filter_img[i])
new_json_data['annotations'].extend(img_ann_info)
shutil.copy(image_path, osp.join(args.out_dir,
out_dataset_type + year))
progress_bar.update()
mmengine.dump(new_json_data, osp.join(args.out_dir, out_ann_file_name))
def _make_dirs(out_dir):
mmengine.mkdir_or_exist(out_dir)
mmengine.mkdir_or_exist(osp.join(out_dir, 'annotations'))
mmengine.mkdir_or_exist(osp.join(out_dir, 'train2017'))
mmengine.mkdir_or_exist(osp.join(out_dir, 'val2017'))
def parse_args():
parser = argparse.ArgumentParser(description='Extract coco subset')
parser.add_argument('root', help='root path')
parser.add_argument(
'out_dir', type=str, help='directory where subset coco will be saved.')
parser.add_argument(
'--num-img',
default=50,
type=int,
help='num of extract image, -1 means all images')
parser.add_argument(
'--area-size',
choices=['small', 'medium', 'large'],
help='filter ground-truth info by area size')
parser.add_argument(
'--classes', nargs='+', help='filter ground-truth by class name')
parser.add_argument(
'--use-training-set',
action='store_true',
help='Whether to use the training set when extract the training set. '
'The training subset is extracted from the validation set by '
'default which can speed up.')
parser.add_argument('--seed', default=-1, type=int, help='seed')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out_dir != args.root, \
'The file will be overwritten in place, ' \
'so the same folder is not allowed !'
seed = int(args.seed)
if seed != -1:
print(f'Set the global seed: {seed}')
np.random.seed(int(args.seed))
_make_dirs(args.out_dir)
print('====Start processing train dataset====')
if args.use_training_set:
_process_data(args, 'train', 'train')
else:
_process_data(args, 'val', 'train')
print('\n====Start processing val dataset====')
_process_data(args, 'val', 'val')
print(f'\n Result save to {args.out_dir}')
if __name__ == '__main__':
main()
| 5,005
| 30.093168
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/misc/coco_split.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import random
from pathlib import Path
import numpy as np
from pycocotools.coco import COCO
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--json', type=str, required=True, help='COCO json label path')
parser.add_argument(
'--out-dir', type=str, required=True, help='output path')
parser.add_argument(
'--ratios',
nargs='+',
type=float,
help='ratio for sub dataset, if set 2 number then will generate '
'trainval + test (eg. "0.8 0.1 0.1" or "2 1 1"), if set 3 number '
'then will generate train + val + test (eg. "0.85 0.15" or "2 1")')
parser.add_argument(
'--shuffle',
action='store_true',
help='Whether to display in disorder')
parser.add_argument('--seed', default=-1, type=int, help='seed')
args = parser.parse_args()
return args
def split_coco_dataset(coco_json_path: str, save_dir: str, ratios: list,
shuffle: bool, seed: int):
if not Path(coco_json_path).exists():
raise FileNotFoundError(f'Can not not found {coco_json_path}')
if not Path(save_dir).exists():
Path(save_dir).mkdir(parents=True)
# ratio normalize
ratios = np.array(ratios) / np.array(ratios).sum()
if len(ratios) == 2:
ratio_train, ratio_test = ratios
ratio_val = 0
train_type = 'trainval'
elif len(ratios) == 3:
ratio_train, ratio_val, ratio_test = ratios
train_type = 'train'
else:
raise ValueError('ratios must set 2 or 3 group!')
# Read coco info
coco = COCO(coco_json_path)
coco_image_ids = coco.getImgIds()
# gen image number of each dataset
val_image_num = int(len(coco_image_ids) * ratio_val)
test_image_num = int(len(coco_image_ids) * ratio_test)
train_image_num = len(coco_image_ids) - val_image_num - test_image_num
print('Split info: ====== \n'
f'Train ratio = {ratio_train}, number = {train_image_num}\n'
f'Val ratio = {ratio_val}, number = {val_image_num}\n'
f'Test ratio = {ratio_test}, number = {test_image_num}')
seed = int(seed)
if seed != -1:
print(f'Set the global seed: {seed}')
np.random.seed(seed)
if shuffle:
print('shuffle dataset.')
random.shuffle(coco_image_ids)
# split each dataset
train_image_ids = coco_image_ids[:train_image_num]
if val_image_num != 0:
val_image_ids = coco_image_ids[train_image_num:train_image_num +
val_image_num]
else:
val_image_ids = None
test_image_ids = coco_image_ids[train_image_num + val_image_num:]
# Save new json
categories = coco.loadCats(coco.getCatIds())
for img_id_list in [train_image_ids, val_image_ids, test_image_ids]:
if img_id_list is None:
continue
# Gen new json
img_dict = {
'images': coco.loadImgs(ids=img_id_list),
'categories': categories,
'annotations': coco.loadAnns(coco.getAnnIds(imgIds=img_id_list))
}
# save json
if img_id_list == train_image_ids:
json_file_path = Path(save_dir, f'{train_type}.json')
elif img_id_list == val_image_ids:
json_file_path = Path(save_dir, 'val.json')
elif img_id_list == test_image_ids:
json_file_path = Path(save_dir, 'test.json')
else:
raise ValueError('img_id_list ERROR!')
print(f'Saving json to {json_file_path}')
with open(json_file_path, 'w') as f_json:
json.dump(img_dict, f_json, ensure_ascii=False, indent=2)
print('All done!')
def main():
args = parse_args()
split_coco_dataset(args.json, args.out_dir, args.ratios, args.shuffle,
args.seed)
if __name__ == '__main__':
main()
| 3,963
| 31.227642
| 76
|
py
|
mmyolo
|
mmyolo-main/tools/model_converters/yolov6_to_mmyolo.py
|
import argparse
from collections import OrderedDict
import torch
def convert(src, dst):
import sys
sys.path.append('yolov6')
try:
ckpt = torch.load(src, map_location=torch.device('cpu'))
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the meituan/YOLOv6 repo,'
' because loading the official pretrained model need'
' some python files to build model.')
# The saved model is the model before reparameterization
model = ckpt['ema' if ckpt.get('ema') else 'model'].float()
new_state_dict = OrderedDict()
for k, v in model.state_dict().items():
name = k
if 'detect' in k:
if 'proj' in k:
continue
name = k.replace('detect', 'bbox_head.head_module')
if k.find('anchors') >= 0 or k.find('anchor_grid') >= 0:
continue
if 'ERBlock_2' in k:
name = k.replace('ERBlock_2', 'stage1.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'ERBlock_3' in k:
name = k.replace('ERBlock_3', 'stage2.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'ERBlock_4' in k:
name = k.replace('ERBlock_4', 'stage3.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'ERBlock_5' in k:
name = k.replace('ERBlock_5', 'stage4.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
if 'stage4.0.2' in name:
name = name.replace('stage4.0.2', 'stage4.1')
name = name.replace('cv', 'conv')
elif 'reduce_layer0' in k:
name = k.replace('reduce_layer0', 'reduce_layers.2')
elif 'Rep_p4' in k:
name = k.replace('Rep_p4', 'top_down_layers.0.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'reduce_layer1' in k:
name = k.replace('reduce_layer1', 'top_down_layers.0.1')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'Rep_p3' in k:
name = k.replace('Rep_p3', 'top_down_layers.1')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'upsample0' in k:
name = k.replace('upsample0.upsample_transpose',
'upsample_layers.0')
elif 'upsample1' in k:
name = k.replace('upsample1.upsample_transpose',
'upsample_layers.1')
elif 'Rep_n3' in k:
name = k.replace('Rep_n3', 'bottom_up_layers.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'Rep_n4' in k:
name = k.replace('Rep_n4', 'bottom_up_layers.1')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'downsample2' in k:
name = k.replace('downsample2', 'downsample_layers.0')
elif 'downsample1' in k:
name = k.replace('downsample1', 'downsample_layers.1')
new_state_dict[name] = v
data = {'state_dict': new_state_dict}
torch.save(data, dst)
# Note: This script must be placed under the yolov6 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolov6s.pt', help='src yolov6 model path')
parser.add_argument('--dst', default='mmyolov6.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,403
| 36.965517
| 73
|
py
|
mmyolo
|
mmyolo-main/tools/model_converters/yolox_to_mmyolo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
neck_dict = {
'backbone.lateral_conv0': 'neck.reduce_layers.2',
'backbone.C3_p4.conv': 'neck.top_down_layers.0.0.cv',
'backbone.C3_p4.m.0.': 'neck.top_down_layers.0.0.m.0.',
'backbone.reduce_conv1': 'neck.top_down_layers.0.1',
'backbone.C3_p3.conv': 'neck.top_down_layers.1.cv',
'backbone.C3_p3.m.0.': 'neck.top_down_layers.1.m.0.',
'backbone.bu_conv2': 'neck.downsample_layers.0',
'backbone.C3_n3.conv': 'neck.bottom_up_layers.0.cv',
'backbone.C3_n3.m.0.': 'neck.bottom_up_layers.0.m.0.',
'backbone.bu_conv1': 'neck.downsample_layers.1',
'backbone.C3_n4.conv': 'neck.bottom_up_layers.1.cv',
'backbone.C3_n4.m.0.': 'neck.bottom_up_layers.1.m.0.',
}
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key[9:]
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_backbone(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('backbone.dark', 'stage')
num = int(new_key[14]) - 1
new_key = new_key[:14] + str(num) + new_key[15:]
if '.m.' in model_key:
new_key = new_key.replace('.m.', '.blocks.')
elif not new_key[16] == '0' and 'stage4.1' not in new_key:
new_key = new_key.replace('conv1', 'main_conv')
new_key = new_key.replace('conv2', 'short_conv')
new_key = new_key.replace('conv3', 'final_conv')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_neck(model_key, model_weight, state_dict, converted_names):
for old, new in neck_dict.items():
if old in model_key:
new_key = model_key.replace(old, new)
if '.m.' in model_key:
new_key = new_key.replace('.m.', '.blocks.')
elif '.C' in model_key:
new_key = new_key.replace('cv1', 'main_conv')
new_key = new_key.replace('cv2', 'short_conv')
new_key = new_key.replace('cv3', 'final_conv')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
if 'stem' in model_key:
new_key = model_key.replace('head.stem', 'neck.out_layer')
elif 'cls_convs' in model_key:
new_key = model_key.replace(
'head.cls_convs', 'bbox_head.head_module.multi_level_cls_convs')
elif 'reg_convs' in model_key:
new_key = model_key.replace(
'head.reg_convs', 'bbox_head.head_module.multi_level_reg_convs')
elif 'preds' in model_key:
new_key = model_key.replace('head.',
'bbox_head.head_module.multi_level_conv_')
new_key = new_key.replace('_preds', '')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert(src, dst):
"""Convert keys in detectron pretrained YOLOX models to mmyolo style."""
blobs = torch.load(src)['model']
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'backbone.stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'backbone.backbone' in key:
convert_backbone(key, weight, state_dict, converted_names)
elif 'backbone.neck' not in key and 'head' not in key:
convert_neck(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolox_s.pth', help='src yolox model path')
parser.add_argument('--dst', default='mmyoloxs.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,218
| 37.009009
| 78
|
py
|
mmyolo
|
mmyolo-main/tools/model_converters/yolov8_to_mmyolo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
convert_dict_s = {
# backbone
'model.0': 'backbone.stem',
'model.1': 'backbone.stage1.0',
'model.2': 'backbone.stage1.1',
'model.3': 'backbone.stage2.0',
'model.4': 'backbone.stage2.1',
'model.5': 'backbone.stage3.0',
'model.6': 'backbone.stage3.1',
'model.7': 'backbone.stage4.0',
'model.8': 'backbone.stage4.1',
'model.9': 'backbone.stage4.2',
# neck
'model.12': 'neck.top_down_layers.0',
'model.15': 'neck.top_down_layers.1',
'model.16': 'neck.downsample_layers.0',
'model.18': 'neck.bottom_up_layers.0',
'model.19': 'neck.downsample_layers.1',
'model.21': 'neck.bottom_up_layers.1',
# Detector
'model.22': 'bbox_head.head_module',
}
def convert(src, dst):
"""Convert keys in pretrained YOLOv8 models to mmyolo style."""
convert_dict = convert_dict_s
try:
yolov8_model = torch.load(src)['model']
blobs = yolov8_model.state_dict()
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the ultralytics repo,'
' because loading the official pretrained model need'
' `model.py` to build model.'
'Also need to install hydra-core>=1.2.0 and thop>=0.1.1')
state_dict = OrderedDict()
for key, weight in blobs.items():
num, module = key.split('.')[1:3]
prefix = f'model.{num}'
new_key = key.replace(prefix, convert_dict[prefix])
if '.m.' in new_key:
new_key = new_key.replace('.m.', '.blocks.')
new_key = new_key.replace('.cv', '.conv')
elif 'bbox_head.head_module' in new_key:
new_key = new_key.replace('.cv2', '.reg_preds')
new_key = new_key.replace('.cv3', '.cls_preds')
elif 'backbone.stage4.2' in new_key:
new_key = new_key.replace('.cv', '.conv')
else:
new_key = new_key.replace('.cv1', '.main_conv')
new_key = new_key.replace('.cv2', '.final_conv')
if 'bbox_head.head_module.dfl.conv.weight' == new_key:
print('Drop "bbox_head.head_module.dfl.conv.weight", '
'because it is useless')
continue
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
# Note: This script must be placed under the YOLOv8 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolov8s.pt', help='src YOLOv8 model path')
parser.add_argument('--dst', default='mmyolov8s.pth', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 2,937
| 31.644444
| 75
|
py
|
mmyolo
|
mmyolo-main/tools/model_converters/rtmdet_to_mmyolo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert(src, dst):
"""Convert keys in pretrained RTMDet models to MMYOLO style."""
blobs = torch.load(src)['state_dict']
state_dict = OrderedDict()
for key, weight in blobs.items():
if 'neck.reduce_layers.0' in key:
new_key = key.replace('.0', '.2')
state_dict[new_key] = weight
elif 'neck.reduce_layers.1' in key:
new_key = key.replace('reduce_layers.1', 'top_down_layers.0.1')
state_dict[new_key] = weight
elif 'neck.top_down_blocks.0' in key:
new_key = key.replace('down_blocks', 'down_layers.0')
state_dict[new_key] = weight
elif 'neck.top_down_blocks.1' in key:
new_key = key.replace('down_blocks', 'down_layers')
state_dict[new_key] = weight
elif 'downsamples' in key:
new_key = key.replace('downsamples', 'downsample_layers')
state_dict[new_key] = weight
elif 'bottom_up_blocks' in key:
new_key = key.replace('bottom_up_blocks', 'bottom_up_layers')
state_dict[new_key] = weight
elif 'out_convs' in key:
new_key = key.replace('out_convs', 'out_layers')
state_dict[new_key] = weight
elif 'bbox_head' in key:
new_key = key.replace('bbox_head', 'bbox_head.head_module')
state_dict[new_key] = weight
elif 'data_preprocessor' in key:
continue
else:
new_key = key
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
checkpoint['meta'] = blobs.get('meta')
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src rtm model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 2,142
| 33.564516
| 75
|
py
|
mmyolo
|
mmyolo-main/tools/model_converters/ppyoloe_to_mmyolo.py
|
import argparse
import pickle
from collections import OrderedDict
import torch
def convert_bn(k: str):
name = k.replace('._mean',
'.running_mean').replace('._variance', '.running_var')
return name
def convert_repvgg(k: str):
if '.conv2.conv1.' in k:
name = k.replace('.conv2.conv1.', '.conv2.rbr_dense.')
return name
elif '.conv2.conv2.' in k:
name = k.replace('.conv2.conv2.', '.conv2.rbr_1x1.')
return name
else:
return k
def convert(src: str, dst: str, imagenet_pretrain: bool = False):
with open(src, 'rb') as f:
model = pickle.load(f)
new_state_dict = OrderedDict()
if imagenet_pretrain:
for k, v in model.items():
if '@@' in k:
continue
if 'stem.' in k:
# backbone.stem.conv1.conv.weight
# -> backbone.stem.0.conv.weight
org_ind = k.split('.')[1][-1]
new_ind = str(int(org_ind) - 1)
name = k.replace('stem.conv%s.' % org_ind,
'stem.%s.' % new_ind)
else:
# backbone.stages.1.conv2.bn._variance
# -> backbone.stage2.0.conv2.bn.running_var
org_stage_ind = k.split('.')[1]
new_stage_ind = str(int(org_stage_ind) + 1)
name = k.replace('stages.%s.' % org_stage_ind,
'stage%s.0.' % new_stage_ind)
name = convert_repvgg(name)
if '.attn.' in k:
name = name.replace('.attn.fc.', '.attn.fc.conv.')
name = convert_bn(name)
name = 'backbone.' + name
new_state_dict[name] = torch.from_numpy(v)
else:
for k, v in model.items():
name = k
if k.startswith('backbone.'):
if '.stem.' in k:
# backbone.stem.conv1.conv.weight
# -> backbone.stem.0.conv.weight
org_ind = k.split('.')[2][-1]
new_ind = str(int(org_ind) - 1)
name = k.replace('.stem.conv%s.' % org_ind,
'.stem.%s.' % new_ind)
else:
# backbone.stages.1.conv2.bn._variance
# -> backbone.stage2.0.conv2.bn.running_var
org_stage_ind = k.split('.')[2]
new_stage_ind = str(int(org_stage_ind) + 1)
name = k.replace('.stages.%s.' % org_stage_ind,
'.stage%s.0.' % new_stage_ind)
name = convert_repvgg(name)
if '.attn.' in k:
name = name.replace('.attn.fc.', '.attn.fc.conv.')
name = convert_bn(name)
elif k.startswith('neck.'):
# fpn_stages
if k.startswith('neck.fpn_stages.'):
# neck.fpn_stages.0.0.conv1.conv.weight
# -> neck.reduce_layers.2.0.conv1.conv.weight
if k.startswith('neck.fpn_stages.0.0.'):
name = k.replace('neck.fpn_stages.0.0.',
'neck.reduce_layers.2.0.')
if '.spp.' in name:
name = name.replace('.spp.conv.', '.spp.conv2.')
# neck.fpn_stages.1.0.conv1.conv.weight
# -> neck.top_down_layers.0.0.conv1.conv.weight
elif k.startswith('neck.fpn_stages.1.0.'):
name = k.replace('neck.fpn_stages.1.0.',
'neck.top_down_layers.0.0.')
elif k.startswith('neck.fpn_stages.2.0.'):
name = k.replace('neck.fpn_stages.2.0.',
'neck.top_down_layers.1.0.')
else:
raise NotImplementedError('Not implemented.')
name = name.replace('.0.convs.', '.0.blocks.')
elif k.startswith('neck.fpn_routes.'):
# neck.fpn_routes.0.conv.weight
# -> neck.upsample_layers.0.0.conv.weight
index = k.split('.')[2]
name = 'neck.upsample_layers.' + index + '.0.' + '.'.join(
k.split('.')[-2:])
name = name.replace('.0.convs.', '.0.blocks.')
elif k.startswith('neck.pan_stages.'):
# neck.pan_stages.0.0.conv1.conv.weight
# -> neck.bottom_up_layers.1.0.conv1.conv.weight
ind = k.split('.')[2]
name = k.replace(
'neck.pan_stages.' + ind, 'neck.bottom_up_layers.' +
('0' if ind == '1' else '1'))
name = name.replace('.0.convs.', '.0.blocks.')
elif k.startswith('neck.pan_routes.'):
# neck.pan_routes.0.conv.weight
# -> neck.downsample_layers.0.conv.weight
ind = k.split('.')[2]
name = k.replace(
'neck.pan_routes.' + ind, 'neck.downsample_layers.' +
('0' if ind == '1' else '1'))
name = name.replace('.0.convs.', '.0.blocks.')
else:
raise NotImplementedError('Not implement.')
name = convert_repvgg(name)
name = convert_bn(name)
elif k.startswith('yolo_head.'):
if ('anchor_points' in k) or ('stride_tensor' in k):
continue
if 'proj_conv' in k:
name = k.replace('yolo_head.proj_conv.',
'bbox_head.head_module.proj_conv.')
else:
for org_key, rep_key in [
[
'yolo_head.stem_cls.',
'bbox_head.head_module.cls_stems.'
],
[
'yolo_head.stem_reg.',
'bbox_head.head_module.reg_stems.'
],
[
'yolo_head.pred_cls.',
'bbox_head.head_module.cls_preds.'
],
[
'yolo_head.pred_reg.',
'bbox_head.head_module.reg_preds.'
]
]:
name = name.replace(org_key, rep_key)
name = name.split('.')
ind = name[3]
name[3] = str(2 - int(ind))
name = '.'.join(name)
name = convert_bn(name)
else:
continue
new_state_dict[name] = torch.from_numpy(v)
data = {'state_dict': new_state_dict}
torch.save(data, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src',
default='ppyoloe_plus_crn_s_80e_coco.pdparams',
help='src ppyoloe model path')
parser.add_argument(
'--dst', default='mmppyoloe_plus_s.pt', help='save path')
parser.add_argument(
'--imagenet-pretrain',
action='store_true',
default=False,
help='Load model pretrained on imagenet dataset which only '
'have weight for backbone.')
args = parser.parse_args()
convert(args.src, args.dst, args.imagenet_pretrain)
if __name__ == '__main__':
main()
| 7,738
| 40.832432
| 78
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.